wangyi111 commited on
Commit
b1ac584
·
verified ·
1 Parent(s): ba8be35

Upload 2 files

Browse files
lc100_s3olci/senbench_lc100s3cls_wrapper.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import kornia.augmentation as K
2
+ import torch
3
+ from torchgeo.datasets.geo import NonGeoDataset
4
+ import os
5
+ from collections.abc import Callable, Sequence
6
+ from torch import Tensor
7
+ import numpy as np
8
+ import rasterio
9
+ import cv2
10
+ from pyproj import Transformer
11
+ from datetime import date
12
+ from typing import TypeAlias, ClassVar
13
+ import pathlib
14
+
15
+ import logging
16
+
17
+ logging.getLogger("rasterio").setLevel(logging.ERROR)
18
+ Path: TypeAlias = str | os.PathLike[str]
19
+
20
+ class SenBenchLC100S3Cls(NonGeoDataset):
21
+ url = None
22
+ #base_dir = 'all_imgs'
23
+ splits = ('train', 'val', 'test')
24
+ label_filenames = {
25
+ 'train': 'lc100_multilabel-train.csv',
26
+ 'val': 'lc100_multilabel-val.csv',
27
+ 'test': 'lc100_multilabel-test.csv',
28
+ }
29
+ static_filenames = {
30
+ 'train': 'static_fnames-train.csv',
31
+ 'val': 'static_fnames-val.csv',
32
+ 'test': 'static_fnames-test.csv',
33
+ }
34
+ all_band_names = (
35
+ 'Oa01_radiance', 'Oa02_radiance', 'Oa03_radiance', 'Oa04_radiance', 'Oa05_radiance', 'Oa06_radiance', 'Oa07_radiance',
36
+ 'Oa08_radiance', 'Oa09_radiance', 'Oa10_radiance', 'Oa11_radiance', 'Oa12_radiance', 'Oa13_radiance', 'Oa14_radiance',
37
+ 'Oa15_radiance', 'Oa16_radiance', 'Oa17_radiance', 'Oa18_radiance', 'Oa19_radiance', 'Oa20_radiance', 'Oa21_radiance',
38
+ )
39
+ all_band_scale = (
40
+ 0.0139465,0.0133873,0.0121481,0.0115198,0.0100953,0.0123538,0.00879161,
41
+ 0.00876539,0.0095103,0.00773378,0.00675523,0.0071996,0.00749684,0.0086512,
42
+ 0.00526779,0.00530267,0.00493004,0.00549962,0.00502847,0.00326378,0.00324118)
43
+ rgb_bands = ('Oa08_radiance', 'Oa06_radiance', 'Oa04_radiance')
44
+
45
+ LC100_CLSID = {
46
+ 0: 0, # unknown
47
+ 20: 1,
48
+ 30: 2,
49
+ 40: 3,
50
+ 50: 4,
51
+ 60: 5,
52
+ 70: 6,
53
+ 80: 7,
54
+ 90: 8,
55
+ 100: 9,
56
+ 111: 10,
57
+ 112: 11,
58
+ 113: 12,
59
+ 114: 13,
60
+ 115: 14,
61
+ 116: 15,
62
+ 121: 16,
63
+ 122: 17,
64
+ 123: 18,
65
+ 124: 19,
66
+ 125: 20,
67
+ 126: 21,
68
+ 200: 22, # ocean
69
+ }
70
+
71
+
72
+ def __init__(
73
+ self,
74
+ root: Path = 'data',
75
+ split: str = 'train',
76
+ bands: Sequence[str] = all_band_names,
77
+ mode = 'static',
78
+ transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,
79
+ download: bool = False,
80
+ ) -> None:
81
+
82
+ self.root = root
83
+ self.transforms = transforms
84
+ self.download = download
85
+ #self.checksum = checksum
86
+
87
+ assert split in ['train', 'val', 'test']
88
+
89
+ self.bands = bands
90
+ self.band_indices = [(self.all_band_names.index(b)+1) for b in bands if b in self.all_band_names]
91
+
92
+ self.mode = mode
93
+ self.img_dir = os.path.join(self.root, 's3_olci')
94
+ self.lc100_cls = os.path.join(self.root, self.label_filenames[split])
95
+
96
+ self.pids = []
97
+ self.labels = []
98
+ with open(self.lc100_cls, 'r') as f:
99
+ lines = f.readlines()
100
+ for line in lines:
101
+ self.pids.append(line.strip().split(',')[0])
102
+ self.labels.append(list(map(int, line.strip().split(',')[1:])))
103
+
104
+ if self.mode == 'static':
105
+ self.static_csv = os.path.join(self.root, self.static_filenames[split])
106
+ with open(self.static_csv, 'r') as f:
107
+ lines = f.readlines()
108
+ self.static_img = {}
109
+ for line in lines:
110
+ pid = line.strip().split(',')[0]
111
+ img_fname = line.strip().split(',')[1]
112
+ self.static_img[pid] = img_fname
113
+
114
+
115
+ self.reference_date = date(1970, 1, 1)
116
+ self.patch_area = (16*10/1000)**2 # patchsize 16 pix, gsd 10m
117
+
118
+ def __len__(self):
119
+ return len(self.pids)
120
+
121
+ def __getitem__(self, index):
122
+
123
+ images, meta_infos = self._load_image(index)
124
+ #meta_info = np.array([coord[0], coord[1], np.nan, self.patch_area]).astype(np.float32)
125
+ label = self._load_target(index)
126
+ if self.mode == 'static':
127
+ sample = {'image': images[0], 'label': label, 'meta': meta_infos[0]}
128
+ elif self.mode == 'series':
129
+ sample = {'image': images, 'label': label, 'meta': meta_infos}
130
+
131
+ if self.transforms is not None:
132
+ sample = self.transforms(sample)
133
+
134
+ return sample
135
+
136
+
137
+ def _load_image(self, index):
138
+
139
+ pid = self.pids[index]
140
+ s3_path = os.path.join(self.img_dir, pid)
141
+ if self.mode == 'static':
142
+ img_fname = self.static_img[pid]
143
+ s3_paths = [os.path.join(s3_path, img_fname)]
144
+ else:
145
+ img_fnames = os.listdir(s3_path)
146
+ s3_paths = []
147
+ for img_fname in img_fnames:
148
+ s3_paths.append(os.path.join(s3_path, img_fname))
149
+
150
+ imgs = []
151
+ img_paths = []
152
+ meta_infos = []
153
+ for img_path in s3_paths:
154
+ with rasterio.open(img_path) as src:
155
+ img = src.read()
156
+ img[np.isnan(img)] = 0
157
+ chs = []
158
+ for b in range(21):
159
+ ch = img[b]*self.all_band_scale[b]
160
+ ch = cv2.resize(ch, (96,96), interpolation=cv2.INTER_CUBIC)
161
+ chs.append(ch)
162
+ img = np.stack(chs)
163
+ img = torch.from_numpy(img).float()
164
+
165
+ # get lon, lat
166
+ cx,cy = src.xy(src.height // 2, src.width // 2)
167
+ if src.crs.to_string() != 'EPSG:4326':
168
+ # convert to lon, lat
169
+ crs_transformer = Transformer.from_crs(src.crs, 'epsg:4326', always_xy=True)
170
+ lon, lat = crs_transformer.transform(cx,cy)
171
+ else:
172
+ lon, lat = cx, cy
173
+ # get time
174
+ img_fname = os.path.basename(img_path)
175
+ date_str = img_fname.split('_')[1][:8]
176
+ date_obj = date(int(date_str[:4]), int(date_str[4:6]), int(date_str[6:8]))
177
+ delta = (date_obj - self.reference_date).days
178
+ meta_info = np.array([lon, lat, delta, self.patch_area]).astype(np.float32)
179
+ meta_info = torch.from_numpy(meta_info)
180
+
181
+ imgs.append(img)
182
+ img_paths.append(img_path)
183
+ meta_infos.append(meta_info)
184
+
185
+ if self.mode == 'series':
186
+ # pad to 4 images if less than 4
187
+ while len(imgs) < 4:
188
+ imgs.append(img)
189
+ img_paths.append(img_path)
190
+ meta_infos.append(meta_info)
191
+
192
+ return imgs, meta_infos
193
+
194
+ def _load_target(self, index):
195
+
196
+ label = self.labels[index]
197
+ labels = torch.zeros(23)
198
+ # turn into one-hot
199
+ for l in label:
200
+ cls_id = self.LC100_CLSID[l]
201
+ labels[cls_id] = 1
202
+
203
+ return labels
204
+
205
+
206
+ class ClsDataAugmentation(torch.nn.Module):
207
+
208
+ def __init__(self, split, size, bands):
209
+ super().__init__()
210
+
211
+ mean = torch.Tensor([0.0])
212
+ std = torch.Tensor([1.0])
213
+
214
+ if split == "train":
215
+ self.transform = torch.nn.Sequential(
216
+ K.Normalize(mean=mean, std=std),
217
+ K.Resize(size=size, align_corners=True),
218
+ K.RandomHorizontalFlip(p=0.5),
219
+ K.RandomVerticalFlip(p=0.5),
220
+ )
221
+ else:
222
+ self.transform = torch.nn.Sequential(
223
+ K.Normalize(mean=mean, std=std),
224
+ K.Resize(size=size, align_corners=True),
225
+ )
226
+
227
+ @torch.no_grad()
228
+ def forward(self, batch: dict[str,]):
229
+ """Torchgeo returns a dictionary with 'image' and 'label' keys, but engine expects a tuple"""
230
+ x_out = self.transform(batch["image"]).squeeze(0)
231
+ return x_out, batch["label"], batch["meta"]
232
+
233
+
234
+ class SenBenchLC100S3ClsDataset:
235
+ def __init__(self, config):
236
+ self.dataset_config = config
237
+ self.img_size = (config.image_resolution, config.image_resolution)
238
+ self.root_dir = config.data_path
239
+ self.bands = config.band_names
240
+ self.mode = config.mode
241
+
242
+ def create_dataset(self):
243
+ train_transform = ClsDataAugmentation(split="train", size=self.img_size, bands=self.bands)
244
+ eval_transform = ClsDataAugmentation(split="test", size=self.img_size, bands=self.bands)
245
+
246
+ dataset_train = SenBenchLC100S3Cls(
247
+ root=self.root_dir, split="train", bands=self.bands, mode=self.mode, transforms=train_transform
248
+ )
249
+ dataset_val = SenBenchLC100S3Cls(
250
+ root=self.root_dir, split="val", bands=self.bands, mode=self.mode, transforms=eval_transform
251
+ )
252
+ dataset_test = SenBenchLC100S3Cls(
253
+ root=self.root_dir, split="test", bands=self.bands, mode=self.mode, transforms=eval_transform
254
+ )
255
+
256
+ return dataset_train, dataset_val, dataset_test
lc100_s3olci/senbench_lc100s3seg_wrapper.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import kornia as K
2
+ import torch
3
+ from torchgeo.datasets.geo import NonGeoDataset
4
+ import os
5
+ from collections.abc import Callable, Sequence
6
+ from torch import Tensor
7
+ import numpy as np
8
+ import rasterio
9
+ import cv2
10
+ from pyproj import Transformer
11
+ from datetime import date
12
+ from typing import TypeAlias, ClassVar
13
+ import pathlib
14
+
15
+ import logging
16
+
17
+ logging.getLogger("rasterio").setLevel(logging.ERROR)
18
+ Path: TypeAlias = str | os.PathLike[str]
19
+
20
+ class SenBenchLC100S3Seg(NonGeoDataset):
21
+ url = None
22
+ #base_dir = 'all_imgs'
23
+ splits = ('train', 'val', 'test')
24
+ # label_filenames = {
25
+ # 'train': 'lc100_multilabel-train.csv',
26
+ # 'val': 'lc100_multilabel-val.csv',
27
+ # 'test': 'lc100_multilabel-test.csv',
28
+ # }
29
+ static_filenames = {
30
+ 'train': 'static_fnames-train.csv',
31
+ 'val': 'static_fnames-val.csv',
32
+ 'test': 'static_fnames-test.csv',
33
+ }
34
+ all_band_names = (
35
+ 'Oa01_radiance', 'Oa02_radiance', 'Oa03_radiance', 'Oa04_radiance', 'Oa05_radiance', 'Oa06_radiance', 'Oa07_radiance',
36
+ 'Oa08_radiance', 'Oa09_radiance', 'Oa10_radiance', 'Oa11_radiance', 'Oa12_radiance', 'Oa13_radiance', 'Oa14_radiance',
37
+ 'Oa15_radiance', 'Oa16_radiance', 'Oa17_radiance', 'Oa18_radiance', 'Oa19_radiance', 'Oa20_radiance', 'Oa21_radiance',
38
+ )
39
+ all_band_scale = (
40
+ 0.0139465,0.0133873,0.0121481,0.0115198,0.0100953,0.0123538,0.00879161,
41
+ 0.00876539,0.0095103,0.00773378,0.00675523,0.0071996,0.00749684,0.0086512,
42
+ 0.00526779,0.00530267,0.00493004,0.00549962,0.00502847,0.00326378,0.00324118)
43
+ rgb_bands = ('Oa08_radiance', 'Oa06_radiance', 'Oa04_radiance')
44
+
45
+ LC100_CLSID = {
46
+ 0: 0, # unknown
47
+ 20: 1,
48
+ 30: 2,
49
+ 40: 3,
50
+ 50: 4,
51
+ 60: 5,
52
+ 70: 6,
53
+ 80: 7,
54
+ 90: 8,
55
+ 100: 9,
56
+ 111: 10,
57
+ 112: 11,
58
+ 113: 12,
59
+ 114: 13,
60
+ 115: 14,
61
+ 116: 15,
62
+ 121: 16,
63
+ 122: 17,
64
+ 123: 18,
65
+ 124: 19,
66
+ 125: 20,
67
+ 126: 21,
68
+ 200: 22, # ocean
69
+ }
70
+
71
+
72
+ def __init__(
73
+ self,
74
+ root: Path = 'data',
75
+ split: str = 'train',
76
+ bands: Sequence[str] = all_band_names,
77
+ mode = 'static',
78
+ transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,
79
+ download: bool = False,
80
+ ) -> None:
81
+
82
+ self.root = root
83
+ self.transforms = transforms
84
+ self.download = download
85
+ #self.checksum = checksum
86
+
87
+ assert split in ['train', 'val', 'test']
88
+
89
+ self.bands = bands
90
+ self.band_indices = [(self.all_band_names.index(b)+1) for b in bands if b in self.all_band_names]
91
+
92
+ self.mode = mode
93
+ self.img_dir = os.path.join(self.root, 's3_olci')
94
+ #self.lc100_cls = os.path.join(self.root, self.label_filenames[split])
95
+ self.label_dir = os.path.join(self.root, 'lc100')
96
+
97
+ if self.mode == 'static':
98
+ self.static_csv = os.path.join(self.root, self.static_filenames[split])
99
+ with open(self.static_csv, 'r') as f:
100
+ lines = f.readlines()
101
+ self.static_img = {}
102
+ for line in lines:
103
+ pid = line.strip().split(',')[0]
104
+ img_fname = line.strip().split(',')[1]
105
+ self.static_img[pid] = img_fname
106
+
107
+ self.pids = list(self.static_img.keys())
108
+
109
+ self.reference_date = date(1970, 1, 1)
110
+ self.patch_area = (16*10/1000)**2 # patchsize 16 pix, gsd 10m
111
+
112
+ def __len__(self):
113
+ return len(self.pids)
114
+
115
+ def __getitem__(self, index):
116
+
117
+ images, meta_infos = self._load_image(index)
118
+ #meta_info = np.array([coord[0], coord[1], np.nan, self.patch_area]).astype(np.float32)
119
+ label = self._load_target(index)
120
+ if self.mode == 'static':
121
+ sample = {'image': images[0], 'mask': label, 'meta': meta_infos[0]}
122
+ elif self.mode == 'series':
123
+ sample = {'image': images, 'mask': label, 'meta': meta_infos}
124
+
125
+ if self.transforms is not None:
126
+ sample = self.transforms(sample)
127
+
128
+ return sample
129
+
130
+
131
+ def _load_image(self, index):
132
+
133
+ pid = self.pids[index]
134
+ s3_path = os.path.join(self.img_dir, pid)
135
+ if self.mode == 'static':
136
+ img_fname = self.static_img[pid]
137
+ s3_paths = [os.path.join(s3_path, img_fname)]
138
+ else:
139
+ img_fnames = os.listdir(s3_path)
140
+ s3_paths = []
141
+ for img_fname in img_fnames:
142
+ s3_paths.append(os.path.join(s3_path, img_fname))
143
+
144
+ imgs = []
145
+ img_paths = []
146
+ meta_infos = []
147
+ for img_path in s3_paths:
148
+ with rasterio.open(img_path) as src:
149
+ img = src.read()
150
+ img[np.isnan(img)] = 0
151
+ chs = []
152
+ for b in range(21):
153
+ ch = img[b]*self.all_band_scale[b]
154
+ #ch = cv2.resize(ch, (96,96), interpolation=cv2.INTER_CUBIC)
155
+ ch = cv2.resize(ch, (282,282), interpolation=cv2.INTER_CUBIC)
156
+ chs.append(ch)
157
+ img = np.stack(chs)
158
+ img = torch.from_numpy(img).float()
159
+
160
+ # get lon, lat
161
+ cx,cy = src.xy(src.height // 2, src.width // 2)
162
+ if src.crs.to_string() != 'EPSG:4326':
163
+ # convert to lon, lat
164
+ crs_transformer = Transformer.from_crs(src.crs, 'epsg:4326', always_xy=True)
165
+ lon, lat = crs_transformer.transform(cx,cy)
166
+ else:
167
+ lon, lat = cx, cy
168
+ # get time
169
+ img_fname = os.path.basename(img_path)
170
+ date_str = img_fname.split('_')[1][:8]
171
+ date_obj = date(int(date_str[:4]), int(date_str[4:6]), int(date_str[6:8]))
172
+ delta = (date_obj - self.reference_date).days
173
+ meta_info = np.array([lon, lat, delta, self.patch_area]).astype(np.float32)
174
+ meta_info = torch.from_numpy(meta_info)
175
+
176
+ imgs.append(img)
177
+ img_paths.append(img_path)
178
+ meta_infos.append(meta_info)
179
+
180
+ if self.mode == 'series':
181
+ # pad to 4 images if less than 4
182
+ while len(imgs) < 4:
183
+ imgs.append(img)
184
+ img_paths.append(img_path)
185
+ meta_infos.append(meta_info)
186
+
187
+ return imgs, meta_infos
188
+
189
+ def _load_target(self, index):
190
+
191
+ pid = self.pids[index]
192
+ label_path = os.path.join(self.label_dir, pid+'.tif')
193
+
194
+ with rasterio.open(label_path) as src:
195
+ label = src.read(1)
196
+ label = cv2.resize(label, (282,282), interpolation=cv2.INTER_NEAREST) # 0-650
197
+ label_new = np.zeros_like(label)
198
+ for k,v in self.LC100_CLSID.items():
199
+ label_new[label==k] = v
200
+ labels = torch.from_numpy(label_new.astype('int64'))
201
+
202
+ return labels
203
+
204
+
205
+
206
+ class SegDataAugmentation(torch.nn.Module):
207
+ def __init__(self, split, size):
208
+ super().__init__()
209
+
210
+ mean = torch.Tensor([0.0])
211
+ std = torch.Tensor([1.0])
212
+
213
+ self.norm = K.augmentation.Normalize(mean=mean, std=std)
214
+
215
+ if split == "train":
216
+ self.transform = K.augmentation.AugmentationSequential(
217
+ K.augmentation.Resize(size=size, align_corners=True),
218
+ K.augmentation.RandomRotation(degrees=90, p=0.5, align_corners=True),
219
+ K.augmentation.RandomHorizontalFlip(p=0.5),
220
+ K.augmentation.RandomVerticalFlip(p=0.5),
221
+ data_keys=["input", "mask"],
222
+ )
223
+ else:
224
+ self.transform = K.augmentation.AugmentationSequential(
225
+ K.augmentation.Resize(size=size, align_corners=True),
226
+ data_keys=["input", "mask"],
227
+ )
228
+
229
+ @torch.no_grad()
230
+ def forward(self, batch: dict[str,]):
231
+ """Torchgeo returns a dictionary with 'image' and 'label' keys, but engine expects a tuple"""
232
+ x,mask = batch["image"], batch["mask"]
233
+ x = self.norm(x)
234
+ x_out, mask_out = self.transform(x, mask)
235
+ return x_out.squeeze(0), mask_out.squeeze(0).squeeze(0), batch["meta"]
236
+
237
+
238
+ class SenBenchLC100S3SegDataset:
239
+ def __init__(self, config):
240
+ self.dataset_config = config
241
+ self.img_size = (config.image_resolution, config.image_resolution)
242
+ self.root_dir = config.data_path
243
+ self.bands = config.band_names
244
+ self.mode = config.mode
245
+
246
+ def create_dataset(self):
247
+ train_transform = SegDataAugmentation(split="train", size=self.img_size)
248
+ eval_transform = SegDataAugmentation(split="test", size=self.img_size)
249
+
250
+ dataset_train = SenBenchLC100S3Seg(
251
+ root=self.root_dir, split="train", bands=self.bands, mode=self.mode, transforms=train_transform
252
+ )
253
+ dataset_val = SenBenchLC100S3Seg(
254
+ root=self.root_dir, split="val", bands=self.bands, mode=self.mode, transforms=eval_transform
255
+ )
256
+ dataset_test = SenBenchLC100S3Seg(
257
+ root=self.root_dir, split="test", bands=self.bands, mode=self.mode, transforms=eval_transform
258
+ )
259
+
260
+ return dataset_train, dataset_val, dataset_test