File size: 5,285 Bytes
3022fb4 b858783 3022fb4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
from torch.utils.data import DataLoader, Dataset
import cv2
import os
import rasterio
import torch
import numpy as np
from pyproj import Transformer
from datetime import date
S3_OLCI_SCALE = [0.0139465,0.0133873,0.0121481,0.0115198,0.0100953,0.0123538,0.00879161,0.00876539,
0.0095103,0.00773378,0.00675523,0.0071996,0.00749684,0.0086512,0.00526779,0.00530267,
0.00493004,0.00549962,0.00502847,0.00326378,0.00324118]
LC100_CLSID = {
0: 0, # unknown
20: 1,
30: 2,
40: 3,
50: 4,
60: 5,
70: 6,
80: 7,
90: 8,
100: 9,
111: 10,
112: 11,
113: 12,
114: 13,
115: 14,
116: 15,
121: 16,
122: 17,
123: 18,
124: 19,
125: 20,
126: 21,
200: 22, # ocean
}
class S3OLCI_LC100ClsDataset(Dataset):
'''
6908/1727 train/test images 96x96x21
23 classes multilabel LULC
nodata: -inf
time series: 1-4 time stamps / location
'''
def __init__(self, root_dir, mode='static', split='train', meta=False):
self.root_dir = root_dir
self.mode = mode
self.meta = meta
self.img_dir = os.path.join(root_dir, split, 's3_olci')
self.lc100_cls = os.path.join(root_dir, split, 'lc100_multilabel.csv')
self.fnames = []
self.labels = []
with open(self.lc100_cls, 'r') as f:
lines = f.readlines()
for line in lines:
self.fnames.append(line.strip().split(',')[0])
self.labels.append(list(map(int, line.strip().split(',')[1:])))
if self.mode == 'static':
self.static_csv = os.path.join(root_dir, split, 'static_fnames.csv')
with open(self.static_csv, 'r') as f:
lines = f.readlines()
self.static_img = {}
for line in lines:
dirname = line.strip().split(',')[0]
img_fname = line.strip().split(',')[1]
self.static_img[dirname] = img_fname
if self.meta:
self.reference_date = date(1970, 1, 1)
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
fname = self.fnames[idx]
s3_path = os.path.join(self.img_dir, fname)
if self.mode == 'static':
img_fname = self.static_img[fname]
s3_paths = [os.path.join(s3_path, img_fname)]
else:
img_fnames = os.listdir(s3_path)
s3_paths = []
for img_fname in img_fnames:
s3_paths.append(os.path.join(s3_path, img_fname))
imgs = []
img_paths = []
meta_infos = []
for img_path in s3_paths:
with rasterio.open(img_path) as src:
img = src.read()
chs = []
for b in range(21):
ch = cv2.resize(img[b], (96,96), interpolation=cv2.INTER_CUBIC)
chs.append(ch)
img = np.stack(chs)
img[np.isnan(img)] = 0
for b in range(21):
img[b] = img[b]*S3_OLCI_SCALE[b]
img = torch.from_numpy(img).float()
if self.meta:
# get lon, lat
cx,cy = src.xy(src.height // 2, src.width // 2)
# convert to lon, lat
#crs_transformer = Transformer.from_crs(src.crs, 'epsg:4326')
#lon, lat = crs_transformer.transform(cx,cy)
lon, lat = cx, cy
# get time
img_fname = os.path.basename(img_path)
date_str = img_fname.split('_')[1][:8]
date_obj = date(int(date_str[:4]), int(date_str[4:6]), int(date_str[6:8]))
delta = (date_obj - self.reference_date).days
meta_info = np.array([lon, lat, delta, 0]).astype(np.float32)
else:
meta_info = np.array([np.nan,np.nan,np.nan,np.nan]).astype(np.float32)
imgs.append(img)
img_paths.append(img_path)
meta_infos.append(meta_info)
if self.mode == 'series':
# pad to 4 images if less than 4
while len(imgs) < 4:
imgs.append(img)
img_paths.append(img_path)
meta_infos.append(meta_info)
label = self.labels[idx]
labels = torch.zeros(23)
# turn into one-hot
for l in label:
cls_id = LC100_CLSID[l]
labels[cls_id] = 1
if self.mode == 'static':
return imgs[0], meta_infos[0], labels
elif self.mode == 'series':
return imgs[0], imgs[1], imgs[2], imgs[3], meta_infos[0], meta_infos[1], meta_infos[2], meta_infos[3], labels
if __name__ == '__main__':
dataset = S3OLCI_LC100ClsDataset(root_dir='../data/downstream/cgls_lc100', mode='static', split=None, meta=True)
dataloader = DataLoader(dataset, batch_size=64, shuffle=False, num_workers=4)
for i, data in enumerate(dataloader):
#print(data[0].shape)
#print(data[1].shape)
#print(data[1])
#print(data[2])
#print(data[0].max())
#break
pass |