File size: 8,745 Bytes
8f69832
 
 
 
b658b84
8f69832
 
 
 
 
 
 
 
 
c92a751
 
8f69832
 
 
 
b658b84
8f69832
 
 
 
 
a7ac778
1a24a58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f69832
c92a751
 
 
8f69832
c92a751
 
 
 
8f69832
c92a751
 
 
 
8f69832
1a24a58
 
 
8f69832
1a24a58
 
 
c92a751
 
 
 
8f69832
c92a751
 
 
 
 
8f69832
c92a751
 
 
 
 
 
 
8f69832
c92a751
 
 
6ef300c
c92a751
 
6ef300c
c92a751
6ef300c
c92a751
 
6ef300c
c92a751
8f69832
c92a751
 
 
8f69832
c92a751
 
 
9477f68
c92a751
 
 
 
9477f68
c92a751
 
 
9477f68
c92a751
1a24a58
c92a751
1a24a58
9477f68
c92a751
 
 
 
9477f68
c92a751
 
 
 
 
 
8f69832
c92a751
 
8f69832
c92a751
8f69832
c92a751
8f69832
1a24a58
c92a751
 
 
8f69832
c92a751
 
 
 
 
 
 
c853074
a7ac778
c92a751
 
c853074
c92a751
 
 
1a24a58
 
 
c853074
1a24a58
c853074
1a24a58
8f69832
c92a751
c853074
c92a751
 
 
 
 
6ef300c
c92a751
 
 
 
 
9996fa3
c92a751
 
 
 
1a24a58
9849035
 
 
 
d48129a
8f69832
c92a751
 
 
1a24a58
c92a751
 
 
 
c853074
 
c92a751
 
 
 
1a24a58
c92a751
 
 
 
1a24a58
 
 
c92a751
 
 
 
a7ac778
 
 
1a24a58
 
c92a751
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
import os
import time
import math
import random
import csv

from io import BytesIO
import numpy as np
from cairosvg import svg2png
import cv2

import filetype
from filetype.match import image_matchers

from progress.bar import ChargingBar

import imgaug as ia
from imgaug import augmenters as iaa
from imgaug.augmentables.batches import UnnormalizedBatch

from entity import Entity
from common import defaults, mkdir
import imtool
import pipelines

BATCH_SIZE = 16
PARALLEL = 20
MIN_BACKGROUND_SIZE = 500

def process_bg(b):

        imw = cv2.imread(b.path)
        im, bb = imtool.remove_white(imw)
        annot = None
        label = b.path.replace('png', 'txt')
        if os.path.exists(label):
            # rewrite label with new coordinates
            [ww, wh, _] = imw.shape
            [iw, ih, _] = im.shape
            es = imtool.read_centroids(label)
            l = ''
            for e in es:
                [i, p, c] = e.values()
                [x,y,w,h] = [
                    max((c.x*ww - bb.x)/iw, 0),
                    max((c.y*wh - bb.y)/ih, 0),
                    (c.w*ww)/iw,
                    (c.h*wh)/ih
                ]

                l += f'{int(i)} {x} {y} {w} {h}\n'
            annot = l

        if im.shape[0] > args.minbgsize and im.shape[1]> args.minbgsize:
            return im, annot
        else:
            raise Exception(f'droping {b.path} after remove_white => {im.shape}')

def filter_bgs(bgs):
    ret = []
    for b in bgs:
        if b.path.endswith('txt'): continue
        try:
            img, annot = process_bg(b)
        except Exception as e:
            print(f'drop: {e}')
            continue
        ret.append((b, img, annot))
    return ret

def process(args):
    dest_images_path = os.path.join(args.dest, 'images')
    dest_labels_path = os.path.join(args.dest, 'labels')

    mkdir.make_dirs([dest_images_path, dest_labels_path])
    logo_images = []
    logo_alphas = []
    logo_labels = {}

    db = {}
    with open(defaults.MAIN_CSV_PATH, 'r') as f:
        reader = csv.DictReader(f)
        db = {e.bco: e for e in [Entity.from_dict(d) for d in reader]}

    background_images = []
    for d in args.background:
        background_images.extend(os.scandir(d))

    print(f'filtering {len(background_images)} background images from {args.background}')
    background_images = filter_bgs(background_images)
    assert(len(background_images))
    stats = {
        'failed': 0,
        'ok': 0
    }

    for d in os.scandir(args.logos):
        img = None
        if not d.is_file():
            stats['failed'] += 1
            continue

        try:
            if filetype.match(d.path, matchers=image_matchers):
                img = cv2.imread(d.path, cv2.IMREAD_UNCHANGED)
            else:
                png = svg2png(url=d.path)
                img = cv2.imdecode(np.asarray(bytearray(png), dtype=np.uint8), cv2.IMREAD_UNCHANGED)
            label = db[d.name.split('.')[0]].id

            (h, w, c) = img.shape
            if c == 3:
                img = imtool.add_alpha(img)

            if img.ndim < 3:
                print(f'very bad dim: {img.ndim}')

            (h, w, c) = img.shape

            assert(w > 10)
            assert(h > 10)

            stats['ok'] += 1

            (b, g, r, _) = cv2.split(img)
            alpha = img[:, :, 3]/255
            d = cv2.merge([b, g, r])

            logo_images.append(d)
            # tried id() tried __array_interface__, tried tagging, nothing works
            logo_labels.update({d.tobytes(): label})

            # XXX(xaiki): we pass alpha as a float32 heatmap,
            # because imgaug is pretty strict about what data it will process
            # and that we want the alpha layer to pass the same transformations as the orig
            logo_alphas.append(np.dstack((alpha, alpha, alpha)).astype('float32'))

        except Exception as e:
            stats['failed'] += 1
            print(f'error loading: {d.path}: {e}')

    print(stats)

    assert(len(logo_alphas) == len(logo_images))
    print(f"will process {len(logo_images)} images on {len(background_images)} backgrounds")

    # so that we don't get a lot of the same logos on the same page.
    zipped = list(zip(logo_images, logo_alphas))
    random.shuffle(zipped)
    logo_images, logo_alphas = zip(*zipped)

    n = len(logo_images)
    batches = []
    for i in range(math.floor(n*2/BATCH_SIZE)):
        s = (i*BATCH_SIZE)%n
        e = min(s + BATCH_SIZE, n)
        le = max(0, BATCH_SIZE - (e - s))

        a = logo_images[0:le] + logo_images[s:e]
        h = logo_alphas[0:le] + logo_alphas[s:e]

        assert(len(a) == BATCH_SIZE)

        batches.append(UnnormalizedBatch(images=a,heatmaps=h))

    bar = ChargingBar(f'augment ({len(logo_images)} logos {len(background_images)} bgs)', max=(len(batches)**2)/3*len(background_images))
    # We use a single, very fast augmenter here to show that batches
    # are only loaded once there is space again in the buffer.
    pipeline = pipelines.HUGE

    def create_generator(lst):
        for b in lst:
            print(f"Loading next unaugmented batch...")
            yield b

    batches_generator = create_generator(batches)

    batch = 0
    with pipeline.pool(processes=args.parallel, seed=1) as pool:
        batches_aug = pool.imap_batches(batches_generator, output_buffer_size=5)

        print(f"Requesting next augmented batch...{batch}/{len(batches)}")
        for i, batch_aug in enumerate(batches_aug):
            idx = list(range(len(batch_aug.images_aug)))
            random.shuffle(idx)
            for j, (d, img, annot) in enumerate(background_images):
                basename = d.name.replace('.png', f'.{i}.{j}')
                annotations = []
                try:
                    annotations.append(annot.rstrip())
                except:
                    pass

                for k in range(math.floor(len(batch_aug.images_aug)/3)):
                    bar.next()
                    logo_idx = (j+k*4)%len(batch_aug.images_aug)

                    orig = batch_aug.images_unaug[logo_idx]
                    label = logo_labels[orig.tobytes()]
                    logo = batch_aug.images_aug[logo_idx]

                    assert(logo.shape == orig.shape)

                    # XXX(xaiki): we get alpha from heatmap, but will only use one channel
                    # we could make mix_alpha into mix_mask and pass all 3 chanels
                    alpha = cv2.split(batch_aug.heatmaps_aug[logo_idx])

                    try:
                        bb = imtool.mix_alpha(img, logo, alpha[0],
                                              random.random(), random.random())
                        c = bb.to_centroid(img.shape)
                        annotations.append(c.to_annotation(label))
                    except AssertionError as err:
                        print(f'couldnt process {i}, {j}: {err}')
                    except Exception as err:
                        print(f'error in mix pipeline: {err}')

                try:
                    cv2.imwrite(f'{dest_images_path}/{basename}.png', img)
                    label_path = f"{dest_labels_path}/{basename}.txt"
                    with open(label_path, 'a') as f:
                        f.write('\n'.join(annotations))
                except Exception:
                    print(f'couldnt write image {basename}')

            if i < len(batches)-1:
                print(f"Requesting next augmented batch...{batch}/{len(batches)}")
                batch += 1
        bar.finish()

if __name__ == '__main__':
    import argparse
    print("✨ augmenting data")
    parser = argparse.ArgumentParser(description='mix backgrounds and logos into augmented data for YOLO')
    parser.add_argument('--logos', metavar='logos', type=str,
                        default=defaults.LOGOS_DATA_PATH,
                        help='dir containing logos')
    parser.add_argument('--background', metavar='backgrounds', type=str,
                        nargs='+',
                        default=[defaults.SCREENSHOT_PATH, defaults.FISH_PATH],
                        help='dir containing background plates')
    parser.add_argument('--dst', dest='dest', type=str,
                        default=defaults.AUGMENTED_DATA_PATH,
                        help='dest dir')
    parser.add_argument('--parallel', metavar='parallel', type=int,
                        default=PARALLEL,
                        help='number of concurrent jobs')
    parser.add_argument('--min-background-size', dest='minbgsize', type=int,
                        default=MIN_BACKGROUND_SIZE, help='minimum background size')
    args = parser.parse_args()
    process(args)