File size: 6,899 Bytes
8f69832
 
 
 
b658b84
8f69832
 
 
 
 
 
 
 
 
c92a751
 
8f69832
 
 
 
b658b84
8f69832
 
 
 
 
 
c92a751
 
 
8f69832
c92a751
 
 
 
8f69832
c92a751
 
 
 
8f69832
c92a751
 
8f69832
c92a751
 
 
 
8f69832
c92a751
 
 
 
 
8f69832
c92a751
 
 
 
 
 
 
8f69832
c92a751
 
 
6ef300c
c92a751
 
6ef300c
c92a751
 
6ef300c
c92a751
 
6ef300c
c92a751
8f69832
c92a751
 
 
8f69832
c92a751
 
 
9477f68
c92a751
 
 
 
9477f68
c92a751
 
 
9477f68
c92a751
 
 
9477f68
c92a751
 
 
 
9477f68
c92a751
 
 
 
 
 
8f69832
c92a751
 
8f69832
c92a751
8f69832
c92a751
8f69832
c9f2bb9
c92a751
 
 
8f69832
c92a751
 
 
 
 
 
 
c853074
c92a751
 
 
c853074
c92a751
 
 
 
c853074
 
 
 
 
 
c92a751
8f69832
c92a751
 
c853074
c92a751
 
 
 
 
6ef300c
c92a751
 
 
 
 
9996fa3
c92a751
 
 
 
 
9849035
 
 
 
d48129a
8f69832
c92a751
 
 
 
 
 
 
 
c853074
 
c92a751
 
 
 
 
 
 
 
 
 
9849035
c92a751
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
import os
import time
import math
import random
import csv

from io import BytesIO
import numpy as np
from cairosvg import svg2png
import cv2

import filetype
from filetype.match import image_matchers

from progress.bar import ChargingBar

import imgaug as ia
from imgaug import augmenters as iaa
from imgaug.augmentables.batches import UnnormalizedBatch

from entity import Entity
from common import defaults, mkdir
import imtool
import pipelines

BATCH_SIZE = 16

def process(args):
    dest_images_path = os.path.join(args.dest, 'images')
    dest_labels_path = os.path.join(args.dest, 'labels')

    mkdir.make_dirs([dest_images_path, dest_labels_path])
    logo_images = []
    logo_alphas = []
    logo_labels = {}

    db = {}
    with open(defaults.MAIN_CSV_PATH, 'r') as f:
        reader = csv.DictReader(f)
        db = {e.bco: e for e in [Entity.from_dict(d) for d in reader]}

    background_images = [d for d in os.scandir(args.backgrounds)]
    assert(len(background_images))

    stats = {
        'failed': 0,
        'ok': 0
    }

    for d in os.scandir(args.logos):
        img = None
        if not d.is_file():
            stats['failed'] += 1
            continue

        try:
            if filetype.match(d.path, matchers=image_matchers):
                img = cv2.imread(d.path, cv2.IMREAD_UNCHANGED)
            else:
                png = svg2png(url=d.path)
                img = cv2.imdecode(np.asarray(bytearray(png), dtype=np.uint8), cv2.IMREAD_UNCHANGED)
            label = db[d.name.split('.')[0]].id

            (h, w, c) = img.shape
            if c == 3:
                img = imtool.add_alpha(img)

            if img.ndim < 3:
                print(f'very bad dim: {img.ndim}')

            img = imtool.remove_white(img)
            (h, w, c) = img.shape

            assert(w > 10)
            assert(h > 10)

            stats['ok'] += 1

            (b, g, r, _) = cv2.split(img)
            alpha = img[:, :, 3]/255
            d = cv2.merge([b, g, r])

            logo_images.append(d)
            # tried id() tried __array_interface__, tried tagging, nothing works
            logo_labels.update({d.tobytes(): label})

            # XXX(xaiki): we pass alpha as a float32 heatmap,
            # because imgaug is pretty strict about what data it will process
            # and that we want the alpha layer to pass the same transformations as the orig
            logo_alphas.append(np.dstack((alpha, alpha, alpha)).astype('float32'))

        except Exception as e:
            stats['failed'] += 1
            print(f'error loading: {d.path}: {e}')

    print(stats)
    #print(len(logo_alphas), len(logo_images), len(logo_labels))
    assert(len(logo_alphas) == len(logo_images))

    # so that we don't get a lot of the same logos on the same page.
    zipped = list(zip(logo_images, logo_alphas))
    random.shuffle(zipped)
    logo_images, logo_alphas = zip(*zipped)

    n = len(logo_images)
    batches = []
    for i in range(math.floor(n*2/BATCH_SIZE)):
        s = (i*BATCH_SIZE)%n
        e = min(s + BATCH_SIZE, n)
        le = max(0, BATCH_SIZE - (e - s))

        a = logo_images[0:le] + logo_images[s:e]
        h = logo_alphas[0:le] + logo_alphas[s:e]

        assert(len(a) == BATCH_SIZE)

        batches.append(UnnormalizedBatch(images=a,heatmaps=h))

    bar = ChargingBar('augment', max=(len(batches)**2)/3*len(background_images))
    # We use a single, very fast augmenter here to show that batches
    # are only loaded once there is space again in the buffer.
    pipeline = pipelines.HUGE

    def create_generator(lst):
        for b in lst:
            print(f"Loading next unaugmented batch...")
            yield b

    batches_generator = create_generator(batches)

    batch = 0
    with pipeline.pool(processes=-1, seed=1) as pool:
        batches_aug = pool.imap_batches(batches_generator, output_buffer_size=5)

        print(f"Requesting next augmented batch...{batch}/{len(batches)}")
        for i, batch_aug in enumerate(batches_aug):
            idx = list(range(len(batch_aug.images_aug)))
            random.shuffle(idx)
            for j, d in enumerate(background_images):
                try:
                    img = imtool.remove_white(cv2.imread(d.path))
                except:
                    print("couldnt remove white, skipping")
                    next

                basename = d.name.replace('.png', '') + f'.{i}.{j}'

                anotations = []
                for k in range(math.floor(len(batch_aug.images_aug)/3)):
                    bar.next()
                    logo_idx = (j+k*4)%len(batch_aug.images_aug)

                    orig = batch_aug.images_unaug[logo_idx]
                    label = logo_labels[orig.tobytes()]
                    logo = batch_aug.images_aug[logo_idx]

                    assert(logo.shape == orig.shape)

                    # XXX(xaiki): we get alpha from heatmap, but will only use one channel
                    # we could make mix_alpha into mix_mask and pass all 3 chanels
                    alpha = cv2.split(batch_aug.heatmaps_aug[logo_idx])

                    try:
                        bb = imtool.mix_alpha(img, logo, alpha[0],
                                              random.random(), random.random())
                        c = bb.to_centroid(img.shape)
                        anotations.append(c.to_anotation(label))
                    except AssertionError as err:
                        print(f'couldnt process {i}, {j}: {err}')
                    except Exception as err:
                        print(f'error in mix pipeline: {err}')

                try:
                    cv2.imwrite(f'{dest_images_path}/{basename}.png', img)
                    label_path = f"{dest_labels_path}/{basename}.txt"
                    with open(label_path, 'a') as f:
                        f.write('\n'.join(anotations))
                except Exception:
                    print(f'couldnt write image {basename}')

            if i < len(batches)-1:
                print(f"Requesting next augmented batch...{batch}/{len(batches)}")
                batch += 1
        bar.finish()

if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(description='mix backgrounds and logos into augmented data for YOLO')
    parser.add_argument('--logos', metavar='logos', type=str,
                        default=defaults.LOGOS_DATA_PATH,
                        help='dir containing logos')
    parser.add_argument('--backgrounds', metavar='backgrounds', type=str,
                        default=defaults.SCREENSHOT_PATH,
                        help='dir containing background plates')
    parser.add_argument('--dst', dest='dest', type=str,
                        default=defaults.AUGMENTED_DATA_PATH,
                        help='dest dir')

    args = parser.parse_args()
    process(args)