Datasets:
Tasks:
Image Segmentation
Modalities:
Image
Languages:
English
Tags:
Cloud Detection
Cloud Segmentation
Remote Sensing Images
Satellite Images
HRC-WHU
CloudSEN12-High
License:
XavierJiezou
commited on
Commit
•
e139551
1
Parent(s):
793caea
Create give_colors_to_mask.py
Browse files- give_colors_to_mask.py +102 -0
give_colors_to_mask.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
from tqdm import tqdm
|
5 |
+
from concurrent.futures import ThreadPoolExecutor
|
6 |
+
|
7 |
+
# Define the function to retrieve the color palette for a given dataset
|
8 |
+
def get_palette(dataset_name: str):
|
9 |
+
if dataset_name in ["cloudsen12_high_l1c", "cloudsen12_high_l2a"]:
|
10 |
+
return [79, 253, 199, 77, 2, 115, 251, 255, 41, 221, 53, 223]
|
11 |
+
if dataset_name == "l8_biome":
|
12 |
+
return [79, 253, 199, 221, 53, 223, 251, 255, 41, 77, 2, 115]
|
13 |
+
if dataset_name in ["gf12ms_whu_gf1", "gf12ms_whu_gf2", "hrc_whu"]:
|
14 |
+
return [79, 253, 199, 77, 2, 115]
|
15 |
+
raise Exception("dataset_name not supported")
|
16 |
+
|
17 |
+
# Function to apply the color palette to a mask
|
18 |
+
def give_colors_to_mask(mask: np.ndarray, colors=None) -> np.ndarray:
|
19 |
+
"""Convert a mask to a colorized version using the specified palette."""
|
20 |
+
im = Image.fromarray(mask.astype(np.uint8)).convert("P")
|
21 |
+
im.putpalette(colors)
|
22 |
+
return im
|
23 |
+
|
24 |
+
# Function to process a single file
|
25 |
+
def process_file(file_path, palette):
|
26 |
+
try:
|
27 |
+
# Load the mask
|
28 |
+
mask = np.array(Image.open(file_path))
|
29 |
+
|
30 |
+
# Apply the color palette
|
31 |
+
colored_mask = give_colors_to_mask(mask, palette)
|
32 |
+
|
33 |
+
# Save the colored mask, overwriting the original file
|
34 |
+
colored_mask.save(file_path)
|
35 |
+
return True
|
36 |
+
except Exception as e:
|
37 |
+
print(f"Error processing {file_path}: {e}")
|
38 |
+
return False
|
39 |
+
|
40 |
+
# Main processing function for a dataset
|
41 |
+
def process_dataset(dataset_name, base_root, progress_bar):
|
42 |
+
ann_dir = os.path.join(base_root, dataset_name, "ann_dir")
|
43 |
+
if not os.path.exists(ann_dir):
|
44 |
+
print(f"Annotation directory does not exist for {dataset_name}: {ann_dir}")
|
45 |
+
return
|
46 |
+
|
47 |
+
# Get the color palette for this dataset
|
48 |
+
palette = get_palette(dataset_name)
|
49 |
+
|
50 |
+
# Gather all files to process
|
51 |
+
files_to_process = []
|
52 |
+
for split in ["train", "val", "test"]:
|
53 |
+
split_dir = os.path.join(ann_dir, split)
|
54 |
+
if not os.path.exists(split_dir):
|
55 |
+
print(f"Split directory does not exist for {dataset_name}: {split_dir}")
|
56 |
+
continue
|
57 |
+
|
58 |
+
# Add all png files in the directory to the list
|
59 |
+
for file_name in os.listdir(split_dir):
|
60 |
+
if file_name.endswith(".png"):
|
61 |
+
files_to_process.append(os.path.join(split_dir, file_name))
|
62 |
+
|
63 |
+
# Multi-threaded processing
|
64 |
+
with ThreadPoolExecutor() as executor:
|
65 |
+
results = list(tqdm(
|
66 |
+
executor.map(lambda f: process_file(f, palette), files_to_process),
|
67 |
+
total=len(files_to_process),
|
68 |
+
desc=f"Processing {dataset_name}",
|
69 |
+
leave=False
|
70 |
+
))
|
71 |
+
|
72 |
+
# Update the progress bar
|
73 |
+
progress_bar.update(len(files_to_process))
|
74 |
+
|
75 |
+
print(f"{dataset_name}: Processed {sum(results)} files out of {len(files_to_process)}.")
|
76 |
+
|
77 |
+
# Define the root directory and datasets
|
78 |
+
base_root = "data" # Replace with your datasets' root directory
|
79 |
+
dataset_names = [
|
80 |
+
"cloudsen12_high_l1c",
|
81 |
+
"cloudsen12_high_l2a",
|
82 |
+
"gf12ms_whu_gf1",
|
83 |
+
"gf12ms_whu_gf2",
|
84 |
+
"hrc_whu",
|
85 |
+
"l8_biome"
|
86 |
+
]
|
87 |
+
|
88 |
+
# Main script
|
89 |
+
if __name__ == "__main__":
|
90 |
+
# Calculate total number of files for all datasets
|
91 |
+
total_files = 0
|
92 |
+
for dataset_name in dataset_names:
|
93 |
+
ann_dir = os.path.join(base_root, dataset_name, "ann_dir")
|
94 |
+
for split in ["train", "val", "test"]:
|
95 |
+
split_dir = os.path.join(ann_dir, split)
|
96 |
+
if os.path.exists(split_dir):
|
97 |
+
total_files += len([f for f in os.listdir(split_dir) if f.endswith(".png")])
|
98 |
+
|
99 |
+
# Create a progress bar
|
100 |
+
with tqdm(total=total_files, desc="Overall Progress") as progress_bar:
|
101 |
+
for dataset_name in dataset_names:
|
102 |
+
process_dataset(dataset_name, base_root, progress_bar)
|