brayden-gg commited on
Commit
b65c5e3
1 Parent(s): eca1306

added files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. DataLoader.py +278 -0
  2. SynthesisNetwork.py +0 -0
  3. __init__.py +0 -0
  4. app.py +187 -0
  5. config/GlobalVariables.py +5 -0
  6. config/__init__.py +0 -0
  7. config/__pycache__/GlobalVariables.cpython-38.pyc +0 -0
  8. config/__pycache__/GlobalVariables.cpython-39.pyc +0 -0
  9. config/__pycache__/__init__.cpython-38.pyc +0 -0
  10. config/__pycache__/__init__.cpython-39.pyc +0 -0
  11. convenience.py +555 -0
  12. data/writers/120/0.npy +3 -0
  13. data/writers/120/1.npy +3 -0
  14. data/writers/120/10.npy +3 -0
  15. data/writers/120/100.npy +3 -0
  16. data/writers/120/101.npy +3 -0
  17. data/writers/120/102.npy +3 -0
  18. data/writers/120/103.npy +3 -0
  19. data/writers/120/104.npy +3 -0
  20. data/writers/120/105.npy +3 -0
  21. data/writers/120/106.npy +3 -0
  22. data/writers/120/107.npy +3 -0
  23. data/writers/120/108.npy +3 -0
  24. data/writers/120/109.npy +3 -0
  25. data/writers/120/11.npy +3 -0
  26. data/writers/120/110.npy +3 -0
  27. data/writers/120/111.npy +3 -0
  28. data/writers/120/112.npy +3 -0
  29. data/writers/120/113.npy +3 -0
  30. data/writers/120/114.npy +3 -0
  31. data/writers/120/115.npy +3 -0
  32. data/writers/120/116.npy +3 -0
  33. data/writers/120/117.npy +3 -0
  34. data/writers/120/118.npy +3 -0
  35. data/writers/120/119.npy +3 -0
  36. data/writers/120/12.npy +3 -0
  37. data/writers/120/120.npy +3 -0
  38. data/writers/120/121.npy +3 -0
  39. data/writers/120/122.npy +3 -0
  40. data/writers/120/123.npy +3 -0
  41. data/writers/120/124.npy +3 -0
  42. data/writers/120/125.npy +3 -0
  43. data/writers/120/126.npy +3 -0
  44. data/writers/120/127.npy +3 -0
  45. data/writers/120/128.npy +3 -0
  46. data/writers/120/129.npy +3 -0
  47. data/writers/120/13.npy +3 -0
  48. data/writers/120/130.npy +3 -0
  49. data/writers/120/131.npy +3 -0
  50. data/writers/120/132.npy +3 -0
DataLoader.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import random
5
+ from PIL import Image, ImageDraw, ImageFont
6
+ import pickle
7
+ from config.GlobalVariables import *
8
+
9
+ np.random.seed(0)
10
+
11
+ class DataLoader():
12
+ def __init__(self, num_writer=2, num_samples=5, divider=10.0, datadir='./data/writers'):
13
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
+ self.num_writer = num_writer
15
+ self.num_samples = num_samples
16
+ self.divider = divider
17
+ self.datadir = datadir
18
+ print ('self.datadir : ', self.datadir)
19
+ self.total_writers = len([name for name in os.listdir(datadir)])
20
+
21
+ def next_batch(self, TYPE='TRAIN', uid=-1, tids=[]):
22
+ all_sentence_level_stroke_in = []
23
+ all_sentence_level_stroke_out = []
24
+ all_sentence_level_stroke_length = []
25
+ all_sentence_level_term = []
26
+ all_sentence_level_char = []
27
+ all_sentence_level_char_length = []
28
+ all_word_level_stroke_in = []
29
+ all_word_level_stroke_out = []
30
+ all_word_level_stroke_length = []
31
+ all_word_level_term = []
32
+ all_word_level_char = []
33
+ all_word_level_char_length = []
34
+ all_segment_level_stroke_in = []
35
+ all_segment_level_stroke_out = []
36
+ all_segment_level_stroke_length = []
37
+ all_segment_level_term = []
38
+ all_segment_level_char = []
39
+ all_segment_level_char_length = []
40
+
41
+ while len(all_sentence_level_stroke_in) < self.num_writer:
42
+ if uid < 0:
43
+ if TYPE == 'TRAIN':
44
+ if self.datadir == './data/NEW_writers' or self.datadir == './data/writers':
45
+ uid = np.random.choice([i for i in range(150)])
46
+ else:
47
+ if self.device == 'cpu':
48
+ uid = np.random.choice([i for i in range(20)])
49
+ else:
50
+ uid = np.random.choice([i for i in range(294)])
51
+ else:
52
+ uid = np.random.choice([i for i in range(150,170)])
53
+
54
+ total_texts = len([name for name in os.listdir(self.datadir+'/'+str(uid))])
55
+ if len(tids) == 0:
56
+ tids = random.sample([i for i in range(total_texts)], self.num_samples)
57
+
58
+ user_sentence_level_stroke_in = []
59
+ user_sentence_level_stroke_out = []
60
+ user_sentence_level_stroke_length = []
61
+ user_sentence_level_term = []
62
+ user_sentence_level_char = []
63
+ user_sentence_level_char_length = []
64
+ user_word_level_stroke_in = []
65
+ user_word_level_stroke_out = []
66
+ user_word_level_stroke_length = []
67
+ user_word_level_term = []
68
+ user_word_level_char = []
69
+ user_word_level_char_length = []
70
+ user_segment_level_stroke_in = []
71
+ user_segment_level_stroke_out = []
72
+ user_segment_level_stroke_length = []
73
+ user_segment_level_term = []
74
+ user_segment_level_char = []
75
+ user_segment_level_char_length = []
76
+
77
+ # print ("uid: ", uid, "\ttids:", tids)
78
+ for tid in tids:
79
+ if self.datadir == './data/NEW_writers':
80
+ [sentence_level_raw_stroke, sentence_level_stroke_in, sentence_level_stroke_out, sentence_level_term, sentence_level_char, word_level_raw_stroke, word_level_stroke_in, word_level_stroke_out, word_level_term, word_level_char, segment_level_raw_stroke, segment_level_stroke_in, segment_level_stroke_out, segment_level_term, segment_level_char] = \
81
+ np.load(self.datadir+'/'+str(uid)+'/'+str(tid)+'.npy', allow_pickle=True, encoding='bytes')
82
+
83
+ elif self.datadir == './data/DW_writers':
84
+ [sentence_level_raw_stroke, sentence_level_char, sentence_level_term, sentence_level_stroke_in, sentence_level_stroke_out,
85
+ word_level_raw_stroke, word_level_char, word_level_term, word_level_stroke_in, word_level_stroke_out,
86
+ segment_level_raw_stroke, segment_level_char, segment_level_term, segment_level_stroke_in, segment_level_stroke_out, _] = \
87
+ np.load(self.datadir+'/'+str(uid)+'/'+str(tid)+'.npy', allow_pickle=True, encoding='bytes')
88
+
89
+ elif self.datadir == './data/VALID_DW_writers':
90
+ [sentence_level_raw_stroke, sentence_level_char, sentence_level_term, sentence_level_stroke_in, sentence_level_stroke_out,
91
+ word_level_raw_stroke, word_level_char, word_level_term, word_level_stroke_in, word_level_stroke_out,
92
+ segment_level_raw_stroke, segment_level_char, segment_level_term, segment_level_stroke_in, segment_level_stroke_out, _] = \
93
+ np.load(self.datadir+'/'+str(uid)+'/'+str(tid)+'.npy', allow_pickle=True, encoding='bytes')
94
+
95
+ else:
96
+ [sentence_level_raw_stroke, sentence_level_stroke_in, sentence_level_stroke_out, sentence_level_term, sentence_level_char, word_level_raw_stroke, word_level_stroke_in, word_level_stroke_out, word_level_term, word_level_char, segment_level_raw_stroke, segment_level_stroke_in, segment_level_stroke_out, segment_level_term, segment_level_char, _] = \
97
+ np.load(self.datadir+'/'+str(uid)+'/'+str(tid)+'.npy', allow_pickle=True, encoding='bytes')
98
+
99
+ if self.datadir == './data/DW_writers':
100
+ sentence_level_char = sentence_level_char[1:]
101
+ sentence_level_term = sentence_level_term[1:]
102
+
103
+ if self.datadir == './data/VALID_DW_writers':
104
+ sentence_level_char = sentence_level_char[1:]
105
+ sentence_level_term = sentence_level_term[1:]
106
+
107
+ while True:
108
+ if len(sentence_level_term) == 0:
109
+ break
110
+ if sentence_level_term[-1] != 1.0:
111
+ sentence_level_raw_stroke = sentence_level_raw_stroke[:-1]
112
+ sentence_level_char = sentence_level_char[:-1]
113
+ sentence_level_term = sentence_level_term[:-1]
114
+ sentence_level_stroke_in = sentence_level_stroke_in[:-1]
115
+ sentence_level_stroke_out = sentence_level_stroke_out[:-1]
116
+ else:
117
+ break
118
+
119
+ tmp = []
120
+ for i, t in enumerate(sentence_level_term):
121
+ if t == 1:
122
+ tmp.append(sentence_level_char[i])
123
+
124
+ a = np.ones_like(sentence_level_stroke_in)
125
+ a[:,:2] /= self.divider
126
+
127
+ if len(sentence_level_stroke_in) == len(sentence_level_term) and len(tmp) > 0 and len(sentence_level_stroke_in) > 0:
128
+ user_sentence_level_stroke_in.append(np.asarray(sentence_level_stroke_in) * a)
129
+ user_sentence_level_stroke_out.append(np.asarray(sentence_level_stroke_out) * a)
130
+ user_sentence_level_stroke_length.append(len(sentence_level_stroke_in))
131
+ user_sentence_level_char.append(np.asarray(tmp))
132
+ user_sentence_level_term.append(np.asarray(sentence_level_term))
133
+ user_sentence_level_char_length.append(len(tmp))
134
+
135
+ for wid in range(len(word_level_stroke_in)):
136
+ each_word_level_stroke_in = word_level_stroke_in[wid]
137
+ each_word_level_stroke_out = word_level_stroke_out[wid]
138
+
139
+ if self.datadir == './data/DW_writers':
140
+ each_word_level_term = word_level_term[wid][1:]
141
+ each_word_level_char = word_level_char[wid][1:]
142
+ elif self.datadir == './data/VALID_DW_writers':
143
+ each_word_level_term = word_level_term[wid][1:]
144
+ each_word_level_char = word_level_char[wid][1:]
145
+ else:
146
+ each_word_level_term = word_level_term[wid]
147
+ each_word_level_char = word_level_char[wid]
148
+
149
+
150
+ # assert (len(each_word_level_stroke_in) == len(each_word_level_char) == len(each_word_level_term))
151
+ while True:
152
+ if len(each_word_level_term) == 0:
153
+ break
154
+ if each_word_level_term[-1] != 1.0:
155
+ # each_word_level_raw_stroke = each_word_level_raw_stroke[:-1]
156
+ each_word_level_char = each_word_level_char[:-1]
157
+ each_word_level_term = each_word_level_term[:-1]
158
+ each_word_level_stroke_in = each_word_level_stroke_in[:-1]
159
+ each_word_level_stroke_out = each_word_level_stroke_out[:-1]
160
+ else:
161
+ break
162
+
163
+ tmp = []
164
+ for i, t in enumerate(each_word_level_term):
165
+ if t == 1:
166
+ tmp.append(each_word_level_char[i])
167
+
168
+ b = np.ones_like(each_word_level_stroke_in)
169
+ b[:,:2] /= self.divider
170
+
171
+ if len(each_word_level_stroke_in) == len(each_word_level_term) and len(tmp) > 0 and len(each_word_level_stroke_in) > 0:
172
+ user_word_level_stroke_in.append(np.asarray(each_word_level_stroke_in) * b)
173
+ user_word_level_stroke_out.append(np.asarray(each_word_level_stroke_out) * b)
174
+ user_word_level_stroke_length.append(len(each_word_level_stroke_in))
175
+ user_word_level_char.append(np.asarray(tmp))
176
+ user_word_level_term.append(np.asarray(each_word_level_term))
177
+ user_word_level_char_length.append(len(tmp))
178
+
179
+ segment_level_stroke_in_list = []
180
+ segment_level_stroke_out_list = []
181
+ segment_level_stroke_length_list = []
182
+ segment_level_char_list = []
183
+ segment_level_term_list = []
184
+ segment_level_char_length_list = []
185
+
186
+ for sid in range(len(segment_level_stroke_in[wid])):
187
+ each_segment_level_stroke_in = segment_level_stroke_in[wid][sid]
188
+ each_segment_level_stroke_out = segment_level_stroke_out[wid][sid]
189
+
190
+ if self.datadir == './data/DW_writers':
191
+ each_segment_level_term = segment_level_term[wid][sid][1:]
192
+ each_segment_level_char = segment_level_char[wid][sid][1:]
193
+ elif self.datadir == './data/VALID_DW_writers':
194
+ each_segment_level_term = segment_level_term[wid][sid][1:]
195
+ each_segment_level_char = segment_level_char[wid][sid][1:]
196
+ else:
197
+ each_segment_level_term = segment_level_term[wid][sid]
198
+ each_segment_level_char = segment_level_char[wid][sid]
199
+
200
+ while True:
201
+ if len(each_segment_level_term) == 0:
202
+ break
203
+ if each_segment_level_term[-1] != 1.0:
204
+ # each_segment_level_raw_stroke = each_segment_level_raw_stroke[:-1]
205
+ each_segment_level_char = each_segment_level_char[:-1]
206
+ each_segment_level_term = each_segment_level_term[:-1]
207
+ each_segment_level_stroke_in = each_segment_level_stroke_in[:-1]
208
+ each_segment_level_stroke_out = each_segment_level_stroke_out[:-1]
209
+ else:
210
+ break
211
+
212
+ tmp = []
213
+ for i, t in enumerate(each_segment_level_term):
214
+ if t == 1:
215
+ tmp.append(each_segment_level_char[i])
216
+
217
+ c = np.ones_like(each_segment_level_stroke_in)
218
+ c[:,:2] /= self.divider
219
+
220
+ if len(each_segment_level_stroke_in) == len(each_segment_level_term) and len(tmp) > 0 and len(each_segment_level_stroke_in) > 0:
221
+ segment_level_stroke_in_list.append(np.asarray(each_segment_level_stroke_in) * c)
222
+ segment_level_stroke_out_list.append(np.asarray(each_segment_level_stroke_out) * c)
223
+ segment_level_stroke_length_list.append(len(each_segment_level_stroke_in))
224
+ segment_level_char_list.append(np.asarray(tmp))
225
+ segment_level_term_list.append(np.asarray(each_segment_level_term))
226
+ segment_level_char_length_list.append(len(tmp))
227
+
228
+ if len(segment_level_stroke_length_list) > 0:
229
+ SEGMENT_MAX_STROKE_LENGTH = np.max(segment_level_stroke_length_list)
230
+ SEGMENT_MAX_CHARACTER_LENGTH = np.max(segment_level_char_length_list)
231
+
232
+ new_segment_level_stroke_in_list = np.asarray([np.pad(a, ((0, SEGMENT_MAX_STROKE_LENGTH-len(a)), (0, 0)), 'constant') for a in segment_level_stroke_in_list])
233
+ new_segment_level_stroke_out_list = np.asarray([np.pad(a, ((0, SEGMENT_MAX_STROKE_LENGTH-len(a)), (0, 0)), 'constant') for a in segment_level_stroke_out_list])
234
+ new_segment_level_term_list = np.asarray([np.pad(a, ((0, SEGMENT_MAX_STROKE_LENGTH-len(a))), 'constant') for a in segment_level_term_list])
235
+ new_segment_level_char_list = np.asarray([np.pad(a, ((0, SEGMENT_MAX_CHARACTER_LENGTH-len(a))), 'constant') for a in segment_level_char_list])
236
+
237
+ user_segment_level_stroke_in.append(new_segment_level_stroke_in_list)
238
+ user_segment_level_stroke_out.append(new_segment_level_stroke_out_list)
239
+ user_segment_level_stroke_length.append(segment_level_stroke_length_list)
240
+ user_segment_level_char.append(new_segment_level_char_list)
241
+ user_segment_level_term.append(new_segment_level_term_list)
242
+ user_segment_level_char_length.append(segment_level_char_length_list)
243
+
244
+ WORD_MAX_STROKE_LENGTH = np.max(user_word_level_stroke_length)
245
+ WORD_MAX_CHARACTER_LENGTH = np.max(user_word_level_char_length)
246
+
247
+ SENTENCE_MAX_STROKE_LENGTH = np.max(user_sentence_level_stroke_length)
248
+ SENTENCE_MAX_CHARACTER_LENGTH = np.max(user_sentence_level_char_length)
249
+
250
+ new_sentence_level_stroke_in = np.asarray([np.pad(a, ((0, SENTENCE_MAX_STROKE_LENGTH-len(a)), (0,0)), 'constant') for a in user_sentence_level_stroke_in])
251
+ new_sentence_level_stroke_out = np.asarray([np.pad(a, ((0, SENTENCE_MAX_STROKE_LENGTH-len(a)), (0,0)), 'constant') for a in user_sentence_level_stroke_out])
252
+ new_sentence_level_term = np.asarray([np.pad(a, ((0, SENTENCE_MAX_STROKE_LENGTH-len(a))), 'constant') for a in user_sentence_level_term])
253
+ new_sentence_level_char = np.asarray([np.pad(a, ((0, SENTENCE_MAX_CHARACTER_LENGTH-len(a))), 'constant') for a in user_sentence_level_char])
254
+ new_word_level_stroke_in = np.asarray([np.pad(a, ((0, WORD_MAX_STROKE_LENGTH-len(a)), (0,0)), 'constant') for a in user_word_level_stroke_in])
255
+ new_word_level_stroke_out = np.asarray([np.pad(a, ((0, WORD_MAX_STROKE_LENGTH-len(a)), (0,0)), 'constant') for a in user_word_level_stroke_out])
256
+ new_word_level_term = np.asarray([np.pad(a, ((0, WORD_MAX_STROKE_LENGTH-len(a))), 'constant') for a in user_word_level_term])
257
+ new_word_level_char = np.asarray([np.pad(a, ((0, WORD_MAX_CHARACTER_LENGTH-len(a))), 'constant') for a in user_word_level_char])
258
+
259
+ all_sentence_level_stroke_in.append(new_sentence_level_stroke_in)
260
+ all_sentence_level_stroke_out.append(new_sentence_level_stroke_out)
261
+ all_sentence_level_stroke_length.append(user_sentence_level_stroke_length)
262
+ all_sentence_level_term.append(new_sentence_level_term)
263
+ all_sentence_level_char.append(new_sentence_level_char)
264
+ all_sentence_level_char_length.append(user_sentence_level_char_length)
265
+ all_word_level_stroke_in.append(new_word_level_stroke_in)
266
+ all_word_level_stroke_out.append(new_word_level_stroke_out)
267
+ all_word_level_stroke_length.append(user_word_level_stroke_length)
268
+ all_word_level_term.append(new_word_level_term)
269
+ all_word_level_char.append(new_word_level_char)
270
+ all_word_level_char_length.append(user_word_level_char_length)
271
+ all_segment_level_stroke_in.append(user_segment_level_stroke_in)
272
+ all_segment_level_stroke_out.append(user_segment_level_stroke_out)
273
+ all_segment_level_stroke_length.append(user_segment_level_stroke_length)
274
+ all_segment_level_term.append(user_segment_level_term)
275
+ all_segment_level_char.append(user_segment_level_char)
276
+ all_segment_level_char_length.append(user_segment_level_char_length)
277
+
278
+ return [all_sentence_level_stroke_in, all_sentence_level_stroke_out, all_sentence_level_stroke_length, all_sentence_level_term, all_sentence_level_char, all_sentence_level_char_length, all_word_level_stroke_in, all_word_level_stroke_out, all_word_level_stroke_length, all_word_level_term, all_word_level_char, all_word_level_char_length, all_segment_level_stroke_in, all_segment_level_stroke_out, all_segment_level_stroke_length, all_segment_level_term, all_segment_level_char, all_segment_level_char_length]
SynthesisNetwork.py ADDED
The diff for this file is too large to render. See raw diff
 
__init__.py ADDED
File without changes
app.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import argparse
3
+ import numpy as np
4
+ from helper import *
5
+ from config.GlobalVariables import *
6
+ from SynthesisNetwork import SynthesisNetwork
7
+ from DataLoader import DataLoader
8
+ import convenience
9
+ import gradio as gr
10
+
11
+ #@title Demo
12
+ device = 'cpu'
13
+ num_samples = 10
14
+
15
+ net = SynthesisNetwork(weight_dim=256, num_layers=3).to(device)
16
+
17
+ if not torch.cuda.is_available():
18
+ try: # retrained model also contains loss in dict
19
+ net.load_state_dict(torch.load('./model/250000.pt', map_location=torch.device(device))["model_state_dict"])
20
+ except:
21
+ net.load_state_dict(torch.load('./model/250000.pt', map_location=torch.device(device)))
22
+
23
+
24
+ dl = DataLoader(num_writer=1, num_samples=10, divider=5.0, datadir='./data/writers')
25
+
26
+
27
+ writer_options = [5, 14, 15, 16, 17, 22, 25, 80, 120, 137, 147, 151]
28
+ all_loaded_data = []
29
+ avail_char = "0 1 2 3 4 5 6 7 8 9 a b c d e f g h i j k l m n o p q r s t u v w x y z A B C D E F G H I J K L M N O P Q R S T U V W X Y Z ! ? \" ' * + - = : ; , . < > \ / [ ] ( ) # $ % &"
30
+ avail_char_list = avail_char.split(" ")
31
+ for writer_id in [120, 80]:
32
+ loaded_data = dl.next_batch(TYPE='TRAIN', uid=writer_id, tids=list(range(num_samples)))
33
+ all_loaded_data.append(loaded_data)
34
+
35
+ default_loaded_data = all_loaded_data[-1]
36
+ # for writer interpolation
37
+ def interpolate_writers(target_word, weight):
38
+ image = convenience.sample_blended_writers([1 - weight, weight], target_word, net, all_loaded_data, device).convert("RGB")
39
+ return image
40
+
41
+ def choose_blend_writers(writer1, writer2):
42
+ id1, id2 = int(writer1.split(" ")[1]), int(writer1.split(" ")[1])
43
+ all_loaded_data.clear()
44
+ for writer_id in [id1, id2]:
45
+ loaded_data = dl.next_batch(TYPE='TRAIN', uid=writer_id, tids=list(range(num_samples)))
46
+ all_loaded_data.append(loaded_data)
47
+
48
+ return gr.Slider.update(label=f"{writer1} vs. {writer2}")
49
+ '''
50
+ def choose_writer(writ, c1, c2, c3, c4, val):
51
+ all_loaded_data.clear()
52
+ w = int(writ.split(" ")[1])
53
+ loaded_data = dl.next_batch(TYPE='TRAIN', uid=w, tids=list(range(num_samples)))
54
+ all_loaded_data.append(loaded_data)
55
+ return char_grid(c1, c2, c3, c4, val)
56
+ '''
57
+ # for character grrid
58
+ def choose_grid_chars(c1, c2, c3, c4):
59
+ return gr.Button.update(value=f"Blend {c1}, {c2}, {c3}, and {c4}!")
60
+
61
+ def char_grid(c1, c2, c3, c4):
62
+ image = convenience.sample_character_grid([c1, c2, c3, c4], 5, net, [default_loaded_data], device).convert("RGB")
63
+ return image
64
+
65
+ # for character blend
66
+ def interpolate_chars(c1, c2, weight):
67
+ image = convenience.sample_blended_chars([1 - weight, weight], [c1, c2], net, [default_loaded_data], device).convert("RGB")
68
+ return image
69
+
70
+ def choose_blend_chars(c1, c2):
71
+ return gr.Slider.update(label=f"'{c1}' vs. '{c2}'")
72
+
73
+ # for MDN
74
+ def mdn_sample(word, maxs, maxr):
75
+ image = convenience.mdn_single_sample(word, maxs, maxr, net, [default_loaded_data], device).convert("RGB")
76
+ return image
77
+ """
78
+ def char_vid(word):
79
+ #make word list
80
+ convenience.char_interpolation_video(word_list, 10, net, [default_loaded_data], device).convert('RGB')
81
+ vid_path = f"/content/drive/MyDrive/Colab Notebooks/Spring22/decoupled-style-descriptors-eb/results/abcdefg_video.mov"
82
+ return gr.Video.update(value=vid_path)
83
+ """
84
+
85
+ with gr.Blocks() as demo:
86
+ with gr.Tabs():
87
+ with gr.TabItem("Blend Writers"):
88
+ target_word = gr.Textbox(label="Target Word", value="hello world", max_lines=1)
89
+ with gr.Row():
90
+ left_ratio_options = ["Style " + str(id) for i, id in enumerate(writer_options) if i % 2 == 0]
91
+ right_ratio_options = ["Style " + str(id) for i, id in enumerate(writer_options) if i % 2 == 1]
92
+ with gr.Column():
93
+ writer1 = gr.Radio(left_ratio_options, value="Style 120", label="Style for first writer")
94
+ with gr.Column():
95
+ writer2 = gr.Radio(right_ratio_options, value="Style 80", label="Style for second writer")
96
+ with gr.Row():
97
+ writer_slider = gr.Slider(0, 1, value=0.3, label="Style 120 vs. Style 80")
98
+ with gr.Row():
99
+ writer_submit = gr.Button("Submit")
100
+ with gr.Row():
101
+ writer_default_image = convenience.sample_blended_writers([0.7, 0.3], "hello world", net, all_loaded_data, device).convert("RGB")
102
+ writer_output = gr.Image(writer_default_image)
103
+
104
+ writer_submit.click(fn=interpolate_writers, inputs=[target_word, writer_slider], outputs=[writer_output])
105
+ writer_slider.change(fn=interpolate_writers, inputs=[target_word, writer_slider], outputs=[writer_output])
106
+ target_word.submit(fn=interpolate_writers, inputs=[target_word, writer_slider], outputs=[writer_output])
107
+
108
+ writer1.change(fn=choose_blend_writers, inputs=[writer1, writer2], outputs=[writer_slider])
109
+ writer2.change(fn=choose_blend_writers, inputs=[writer1, writer2], outputs=[writer_slider])
110
+
111
+
112
+ with gr.TabItem("Blend Characters"):
113
+ with gr.Row():
114
+ with gr.Column():
115
+ char1 = gr.Dropdown(choices=avail_char_list, value="y", label="Character 1")
116
+ with gr.Column():
117
+ char2 = gr.Dropdown(choices=avail_char_list, value="s", label="Character 2")
118
+ with gr.Row():
119
+ char_slider = gr.Slider(0, 1, value=0.3, label="'y' vs. 's'")
120
+ with gr.Row():
121
+ char_default_image = convenience.sample_blended_chars([0.7, 0.3], ["y", "s"], net, [default_loaded_data], device).convert("RGB")
122
+ char_output = gr.Image(char_default_image)
123
+
124
+ char_slider.change(fn=interpolate_chars, inputs=[char1, char2, char_slider], outputs=[char_output])
125
+
126
+ char1.change(fn=choose_blend_chars, inputs=[char1, char2], outputs=[char_slider])
127
+ char2.change(fn=choose_blend_chars, inputs=[char1, char2], outputs=[char_slider])
128
+
129
+ """
130
+ with gr.TabItem("Character Grid"): #slow
131
+ with gr.Row():
132
+ with gr.Column():
133
+ char1 = gr.Dropdown(choices=avail_char_list, value="y", label="Character 1")
134
+ with gr.Column():
135
+ char2 = gr.Dropdown(choices=avail_char_list, value="s", label="Character 2")
136
+ with gr.Column():
137
+ char3 = gr.Dropdown(choices=avail_char_list, value="u", label="Character 3")
138
+ with gr.Column():
139
+ char4 = gr.Dropdown(choices=avail_char_list, value="n", label="Character 4")
140
+ with gr.Row():
141
+ submit_button = gr.Button(value="Blend y, s, u, and n!")
142
+ '''
143
+ with gr.Row():
144
+ radio_options2 = ["Writer " + str(n) for n in writer_options]
145
+ writer = gr.Radio(radio_options2, value="Writer 80", label="Style for Writer")
146
+ writer.change(fn=choose_writer, inputs=[writer, char1, char2, char3, char4, slider2], outputs=[output])
147
+ '''
148
+ #slider2 = gr.Slider(2, 20, value=10, label="Grid Size", step=1)
149
+
150
+ default_image = convenience.sample_character_grid(['y', 's', 'u', 'n'], 10, net, [default_loaded_data], device).convert("RGB")
151
+ output = gr.Image(default_image)
152
+
153
+ char1.change(fn=choose_grid_chars, inputs=[char1, char2, char3, char4], outputs=[submit_button])
154
+ char2.change(fn=choose_grid_chars, inputs=[char1, char2, char3, char4], outputs=[submit_button])
155
+ char3.change(fn=choose_grid_chars, inputs=[char1, char2, char3, char4], outputs=[submit_button])
156
+ char4.change(fn=choose_grid_chars, inputs=[char1, char2, char3, char4], outputs=[submit_button])
157
+
158
+
159
+ #slider2.change(fn=char_grid, inputs=[char1, char2, char3, char4, slider2], outputs=[output])
160
+ submit_button.click(fn=char_grid, inputs=[char1, char2, char3, char4], outputs=[output])
161
+ """
162
+
163
+ with gr.TabItem("Add Randomness"):
164
+ mdn_word = gr.Textbox(label="Target Word", value="hello world", max_lines=1)
165
+ '''
166
+ with gr.Row():
167
+ radio_options3 = ["Writer " + str(n) for n in writer_options]
168
+ writer = gr.Radio(radio_options3, value="Writer 80", label="Style for Writer")
169
+ writer.change(fn=new_writer_mdn, inputs=[writer, slider3, slider4], outputs=[output])
170
+ '''
171
+ with gr.Row():
172
+ with gr.Column():
173
+ max_rand = gr.Slider(0, 1, value=1, label="Maximum Randomness")
174
+ with gr.Column():
175
+ scale_rand = gr.Slider(0, 3, value=0.5, label="Scale of Randomness")
176
+ with gr.Row():
177
+ sample_button = gr.Button(value="Resample!")
178
+ with gr.Row():
179
+ default_im = convenience.mdn_single_sample("hello world", 0.5, 1, net, [default_loaded_data], device).convert('RGB')
180
+ mdn_output = gr.Image(default_im)
181
+
182
+ max_rand.change(fn=mdn_sample, inputs=[mdn_word, scale_rand, max_rand], outputs=[mdn_output])
183
+ scale_rand.change(fn=mdn_sample, inputs=[mdn_word, scale_rand, max_rand], outputs=[mdn_output])
184
+ sample_button.click(fn=mdn_sample, inputs=[mdn_word, scale_rand, max_rand], outputs=[mdn_output])
185
+ mdn_word.submit(fn=mdn_sample, inputs=[mdn_word, scale_rand, max_rand], outputs=[mdn_output])
186
+
187
+ demo.launch()
config/GlobalVariables.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ COLORS = [(255,255,255), (255,0,0), (0,255,0), (0,0,255), (255,255,0),(0,255,255),(255,0,255),(255,128,0),(0,255,128),(128,0,255),(255,0,128),(128,255,0),(0,128,255)]
2
+ CHARACTERS = ' !"#$%&\'()*+,-./0123456789:;<=>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz'
3
+ # CHARACTERS = ' !"&\'(),-.:;?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz'
4
+
5
+ ''.join([CHARACTERS[i] for i in [4,2,30]])
config/__init__.py ADDED
File without changes
config/__pycache__/GlobalVariables.cpython-38.pyc ADDED
Binary file (771 Bytes). View file
 
config/__pycache__/GlobalVariables.cpython-39.pyc ADDED
Binary file (747 Bytes). View file
 
config/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (239 Bytes). View file
 
config/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (239 Bytes). View file
 
convenience.py ADDED
@@ -0,0 +1,555 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import re
4
+ from random import random
5
+ import torch
6
+ import pickle
7
+ import argparse
8
+ import numpy as np
9
+ from helper import *
10
+ from PIL import Image
11
+ import torch.nn as nn
12
+ import torch.optim as optim
13
+ from config.GlobalVariables import *
14
+ from tensorboardX import SummaryWriter
15
+ from SynthesisNetwork import SynthesisNetwork
16
+ from DataLoader import DataLoader
17
+ # import ffmpeg # for problems with ffmpeg uninstall ffmpeg and then install ffmpeg-python
18
+
19
+ L = 256
20
+
21
+ def get_mean_global_W(net, loaded_data, device):
22
+ """gets the mean global style vector for a given writer"""
23
+ [_, _, _, _, _, _, all_word_level_stroke_in, all_word_level_stroke_out, all_word_level_stroke_length, all_word_level_term, all_word_level_char, all_word_level_char_length, all_segment_level_stroke_in, all_segment_level_stroke_out,
24
+ all_segment_level_stroke_length, all_segment_level_term, all_segment_level_char, all_segment_level_char_length] = loaded_data
25
+
26
+ batch_word_level_stroke_in = [torch.FloatTensor(a).to(device) for a in all_word_level_stroke_in]
27
+ batch_word_level_stroke_out = [torch.FloatTensor(a).to(device) for a in all_word_level_stroke_out]
28
+ batch_word_level_stroke_length = [torch.LongTensor(a).to(device).unsqueeze(-1) for a in all_word_level_stroke_length]
29
+ batch_word_level_term = [torch.FloatTensor(a).to(device) for a in all_word_level_term]
30
+ batch_word_level_char = [torch.LongTensor(a).to(device) for a in all_word_level_char]
31
+ batch_word_level_char_length = [torch.LongTensor(a).to(device).unsqueeze(-1) for a in all_word_level_char_length]
32
+ batch_segment_level_stroke_in = [[torch.FloatTensor(a).to(device) for a in b] for b in all_segment_level_stroke_in]
33
+ batch_segment_level_stroke_out = [[torch.FloatTensor(a).to(device) for a in b] for b in all_segment_level_stroke_out]
34
+ batch_segment_level_stroke_length = [[torch.LongTensor(a).to(device).unsqueeze(-1) for a in b] for b in all_segment_level_stroke_length]
35
+ batch_segment_level_term = [[torch.FloatTensor(a).to(device) for a in b] for b in all_segment_level_term]
36
+ batch_segment_level_char = [[torch.LongTensor(a).to(device) for a in b] for b in all_segment_level_char]
37
+ batch_segment_level_char_length = [[torch.LongTensor(a).to(device).unsqueeze(-1) for a in b] for b in all_segment_level_char_length]
38
+
39
+ with torch.no_grad():
40
+ word_inf_state_out = net.inf_state_fc1(batch_word_level_stroke_out[0])
41
+ word_inf_state_out = net.inf_state_relu(word_inf_state_out)
42
+ word_inf_state_out, _ = net.inf_state_lstm(word_inf_state_out)
43
+
44
+ user_word_level_char = batch_word_level_char[0]
45
+ user_word_level_term = batch_word_level_term[0]
46
+
47
+ original_Wc = []
48
+ word_batch_id = 0
49
+
50
+ curr_seq_len = batch_word_level_stroke_length[0][word_batch_id][0]
51
+ curr_char_len = batch_word_level_char_length[0][word_batch_id][0]
52
+
53
+ char_vector = torch.eye(len(CHARACTERS))[user_word_level_char[word_batch_id][:curr_char_len]].to(device)
54
+ current_term = user_word_level_term[word_batch_id][:curr_seq_len].unsqueeze(-1)
55
+ split_ids = torch.nonzero(current_term)[:, 0]
56
+
57
+ char_vector_1 = net.char_vec_fc_1(char_vector)
58
+ char_vector_1 = net.char_vec_relu_1(char_vector_1)
59
+
60
+ char_out_1 = char_vector_1.unsqueeze(0)
61
+ char_out_1, (c, h) = net.char_lstm_1(char_out_1)
62
+ char_out_1 = char_out_1.squeeze(0)
63
+ char_out_1 = net.char_vec_fc2_1(char_out_1)
64
+ char_matrix_1 = char_out_1.view([-1, 1, 256, 256])
65
+ char_matrix_1 = char_matrix_1.squeeze(1)
66
+ char_matrix_inv_1 = torch.inverse(char_matrix_1)
67
+
68
+ W_c_t = word_inf_state_out[word_batch_id][:curr_seq_len]
69
+ W_c = torch.stack([W_c_t[i] for i in split_ids])
70
+ original_Wc.append(W_c)
71
+
72
+ W = torch.bmm(char_matrix_inv_1, W_c.unsqueeze(2)).squeeze(-1)
73
+ mean_global_W = torch.mean(W, 0)
74
+ return mean_global_W
75
+
76
+
77
+ def get_DSD(net, target_word, writer_mean_Ws, all_loaded_data, device):
78
+ """
79
+ returns a style vector and character matrix for each character/segment in target_word
80
+
81
+ n is the number of writers
82
+ M is the number of characters in the target word
83
+ L is the latent vector size (in this case 256)
84
+
85
+ input:
86
+ - target_word, a string of length M to be converted to a DSD
87
+ - writer_mean_Ws, a list of n style vectors of size L
88
+
89
+ output:
90
+ - all_writer_Ws, a tensor of size n x M x L representing the style vectors for each writer and character
91
+ - all_writer_Cs, a tensor of size n x M x L x L representing the corresponding character matrix
92
+ """
93
+
94
+ n = len(all_loaded_data)
95
+ M = len(target_word)
96
+ all_writer_Ws = torch.zeros(n, M, L)
97
+ all_writer_Cs = torch.zeros(n, M, L, L)
98
+
99
+ for i in range(n):
100
+ np.random.seed(0)
101
+
102
+ [_, _, _, _, _, _, all_word_level_stroke_in, all_word_level_stroke_out, all_word_level_stroke_length, all_word_level_term, all_word_level_char, all_word_level_char_length, all_segment_level_stroke_in, all_segment_level_stroke_out,
103
+ all_segment_level_stroke_length, all_segment_level_term, all_segment_level_char, all_segment_level_char_length] = all_loaded_data[i]
104
+
105
+ available_segments = {}
106
+ for sid, sentence in enumerate(all_segment_level_char[0]):
107
+ for wid, word in enumerate(sentence):
108
+ segment = ''.join([CHARACTERS[i] for i in word])
109
+ split_ids = np.asarray(np.nonzero(all_segment_level_term[0][sid][wid]))
110
+
111
+ if segment in available_segments:
112
+ available_segments[segment].append([all_segment_level_stroke_out[0][sid][wid][:all_segment_level_stroke_length[0][sid][wid]], split_ids])
113
+ else:
114
+ available_segments[segment] = [[all_segment_level_stroke_out[0][sid][wid][:all_segment_level_stroke_length[0][sid][wid]], split_ids]]
115
+
116
+ index = 0
117
+ all_W = []
118
+ all_C = []
119
+
120
+ # while index <= len(target_word):
121
+ while index < len(target_word):
122
+ available = False
123
+ # Currently this just uses each character individually instead of the whole segment
124
+ # for end_index in range(len(target_word), index, -1):
125
+ # segment = target_word[index:end_index]
126
+ # print (segment)
127
+ segment = target_word[index]
128
+ if segment in available_segments: # method beta
129
+ # print(f'in dic - {segment}')
130
+ available = True
131
+ candidates = available_segments[segment]
132
+ segment_level_stroke_out, split_ids = candidates[np.random.randint(len(candidates))]
133
+ out = net.inf_state_fc1(torch.FloatTensor(segment_level_stroke_out).to(device).unsqueeze(0))
134
+ out = net.inf_state_relu(out)
135
+ seg_W_c, (h_n, _) = net.inf_state_lstm(out)
136
+
137
+ character = segment[0] # take the first character of the segment?
138
+
139
+ # get character matrix using same method as method beta
140
+ char_vector = torch.eye(len(CHARACTERS))[CHARACTERS.index(character)].to(device).unsqueeze(0)
141
+ out = net.char_vec_fc_1(char_vector)
142
+ out = net.char_vec_relu_1(out)
143
+ out, _ = net.char_lstm_1(out.unsqueeze(0))
144
+ out = out.squeeze(0)
145
+ out = net.char_vec_fc2_1(out)
146
+ char_matrix = out.view([-1, 256, 256])
147
+ inv_char_matrix = char_matrix.inverse()
148
+
149
+ id = split_ids[0][0]
150
+ W_c_vector = seg_W_c[0, id].squeeze()
151
+
152
+ # invert to get writer-independed DSD
153
+ W_vector = torch.bmm(inv_char_matrix, W_c_vector.repeat(inv_char_matrix.size(0), 1).unsqueeze(2))
154
+ all_W.append(W_vector)
155
+ all_C.append(char_matrix)
156
+
157
+ index += 1
158
+
159
+ if index == len(target_word):
160
+ break
161
+
162
+ if not available: # method alpha
163
+ character = target_word[index]
164
+ # print(f'no dic - {character}')
165
+ char_vector = torch.eye(len(CHARACTERS))[CHARACTERS.index(character)].to(device).unsqueeze(0)
166
+ out = net.char_vec_fc_1(char_vector)
167
+ out = net.char_vec_relu_1(out)
168
+ out, _ = net.char_lstm_1(out.unsqueeze(0))
169
+ out = out.squeeze(0)
170
+ out = net.char_vec_fc2_1(out)
171
+ char_matrix = out.view([-1, 256, 256])
172
+
173
+ W_vector = writer_mean_Ws[i].repeat(char_matrix.size(0), 1).unsqueeze(2)
174
+
175
+ # all_W.append([W_vector])
176
+ all_W.append(W_vector)
177
+ all_C.append(char_matrix)
178
+
179
+ index += 1
180
+
181
+ all_writer_Ws[i, :, :] = torch.stack(all_W).squeeze()
182
+ all_writer_Cs[i, :, :, :] = torch.stack(all_C).squeeze()
183
+
184
+ return all_writer_Ws, all_writer_Cs
185
+
186
+
187
+ def get_writer_blend_W_c(writer_weights, all_Ws, all_Cs):
188
+ """
189
+ generates character-dependent style-dependent DSDs for each character/segement in target_word,
190
+ averaging together the styles of the handwritings using provided weights
191
+
192
+ n is the number of writers
193
+ M is the number of characters in the target word
194
+ L is the latent vector size (in this case 256)
195
+
196
+ input:
197
+ - writer_weights, a list of length n weights for each writer that sum to one
198
+ - all_writer_Ws, an n x M x L tensor representing each weiter's style vector for every character
199
+ - all_writer_Cs, an n x M x L x L tensor representing the style's correspodning character matrix
200
+
201
+ output:
202
+ - an M x 1 x L tensor of M scharacter-dependent style-dependent DSDs
203
+ """
204
+ n, M, _ = all_Ws.shape
205
+ weights_tensor = torch.tensor(writer_weights).repeat_interleave(M * L).reshape(n, M, L) # repeat accross remaining dimensions
206
+ W_vectors = (weights_tensor * all_Ws).sum(axis=0).unsqueeze(-1) # take weighted sum accross writers axis
207
+ char_matrices = all_Cs[0, :, :, :] # character matrices are independent of writer
208
+
209
+ W_cs = torch.bmm(char_matrices, W_vectors)
210
+
211
+ return W_cs.reshape(M, 1, L)
212
+
213
+
214
+ def get_character_blend_W_c(character_weights, all_Ws, all_Cs):
215
+ """
216
+ generates a single character-dependent style-dependent DSD,
217
+ averaging together the characters using provided weights
218
+
219
+ M is the number of characters to blend
220
+ L is the latent vector size (in this case 256)
221
+
222
+ input:
223
+ - character_weights, a list of length M weights for each character that sum to one
224
+ - all_Ws, a 1 x M x L tensor representing the wwiter's style vector for each character
225
+ - all_Cs, 1 x M x L x L tensor representing the style's correspodning character matrix
226
+
227
+ output:
228
+ - a 1 x 1 x L tensor representing the character-dependent style-dependent DSDs
229
+ """
230
+ M = len(character_weights)
231
+ W_vector = all_Ws[0, 0, :].unsqueeze(-1)
232
+
233
+ weights_tensor = torch.tensor(character_weights).repeat_interleave(L * L).reshape(1, M, L, L) # repeat accross remaining dimensions
234
+ char_matrix = (weights_tensor * all_Cs).sum(axis=1).squeeze() # take weighted sum accross characters axis
235
+
236
+ W_c = char_matrix @ W_vector
237
+
238
+ return W_c.reshape(1, 1, L)
239
+
240
+
241
+ def get_commands(net, target_word, all_W_c): # seems like target_word is only used for length
242
+ """converts character-dependent style-dependent DSDs to a list of commands for drawing"""
243
+ all_commands = []
244
+ current_id = 0
245
+ while True:
246
+ word_Wc_rec_TYPE_D = []
247
+ TYPE_D_REF = []
248
+ cid = 0
249
+ for segment_batch_id in range(len(all_W_c)):
250
+ if len(TYPE_D_REF) == 0:
251
+ for each_segment_Wc in all_W_c[segment_batch_id]:
252
+ if cid >= current_id:
253
+ word_Wc_rec_TYPE_D.append(each_segment_Wc)
254
+ cid += 1
255
+ if len(word_Wc_rec_TYPE_D) > 0:
256
+ TYPE_D_REF.append(all_W_c[segment_batch_id][-1])
257
+ else:
258
+ for each_segment_Wc in all_W_c[segment_batch_id]:
259
+ magic_inp = torch.cat([torch.stack(TYPE_D_REF, 0), each_segment_Wc.unsqueeze(0)], 0)
260
+ magic_inp = magic_inp.unsqueeze(0)
261
+ TYPE_D_out, (c, h) = net.magic_lstm(magic_inp)
262
+ TYPE_D_out = TYPE_D_out.squeeze(0)
263
+ word_Wc_rec_TYPE_D.append(TYPE_D_out[-1])
264
+ TYPE_D_REF.append(all_W_c[segment_batch_id][-1])
265
+ WC_ = torch.stack(word_Wc_rec_TYPE_D)
266
+ tmp_commands, res = net.sample_from_w_fix(WC_)
267
+ current_id += res
268
+ if len(all_commands) == 0:
269
+ all_commands.append(tmp_commands)
270
+ else:
271
+ all_commands.append(tmp_commands[1:])
272
+ if res < 0 or current_id >= len(target_word):
273
+ break
274
+
275
+ commands = []
276
+ px, py = 0, 100
277
+ for coms in all_commands:
278
+ for i, [dx, dy, t] in enumerate(coms):
279
+ x = px + dx * 5
280
+ y = py + dy * 5
281
+ commands.append([x, y, t])
282
+ px, py = x, y
283
+ commands = np.asarray(commands)
284
+ commands[:, 0] -= np.min(commands[:, 0])
285
+
286
+ return commands
287
+
288
+ def mdn_video(target_word, num_samples, scale_sd, clamp_mdn, net, all_loaded_data, device):
289
+ '''
290
+ Method creating gif of mdn samples
291
+ num_samples: number of samples to be inputted
292
+ max_scale: the maximum value used to scale SD while sampling (increment is based on num samples)
293
+ '''
294
+ words = target_word.split(' ')
295
+ us_target_word = re.sub(r"\s+", '_', target_word)
296
+ os.makedirs(f"./results/{us_target_word}_mdn_samples", exist_ok=True)
297
+ for i in range(num_samples):
298
+ im = Image.fromarray(np.zeros([160, 750]))
299
+ dr = ImageDraw.Draw(im)
300
+ width = 50
301
+
302
+ net.scale_sd = scale_sd
303
+ net.clamp_mdn = clamp_mdn
304
+
305
+ mean_global_W = get_mean_global_W(net, all_loaded_data[0], device)
306
+
307
+ for word in words:
308
+ writer_Ws, writer_Cs = get_DSD(net, word, [mean_global_W], [all_loaded_data[0]], device)
309
+ all_W_c = get_writer_blend_W_c([1], writer_Ws, writer_Cs)
310
+ all_commands = get_commands(net, word, all_W_c)
311
+
312
+ for [x, y, t] in all_commands:
313
+ if t == 0:
314
+ dr.line((px+width, py, x+width, y), 255, 1)
315
+ px, py = x, y
316
+ width += np.max(all_commands[:, 0]) + 25
317
+
318
+ im.convert("RGB").save(f'results/{us_target_word}_mdn_samples/sample_{i}.png')
319
+ # Convert fromes to video using ffmpeg
320
+ photos = ffmpeg.input(f'results/{us_target_word}_mdn_samples/sample_*.png', pattern_type='glob', framerate=10)
321
+ videos = photos.output(f'results/{us_target_word}_video.mov', vcodec="libx264", pix_fmt="yuv420p")
322
+ videos.run(overwrite_output=True)
323
+
324
+ def sample_blended_writers(writer_weights, target_sentence, net, all_loaded_data, device="cpu"):
325
+ """Generates an image of handwritten text based on target_sentence"""
326
+ words = target_sentence.split(' ')
327
+
328
+ im = Image.fromarray(np.zeros([160, 750]))
329
+ dr = ImageDraw.Draw(im)
330
+ width = 50
331
+
332
+ writer_mean_Ws = []
333
+ for loaded_data in all_loaded_data:
334
+ mean_global_W = get_mean_global_W(net, loaded_data, device)
335
+ writer_mean_Ws.append(mean_global_W)
336
+
337
+ for word in words:
338
+ all_writer_Ws, all_writer_Cs = get_DSD(net, word, writer_mean_Ws, all_loaded_data, device)
339
+ all_W_c = get_writer_blend_W_c(writer_weights, all_writer_Ws, all_writer_Cs)
340
+ all_commands = get_commands(net, word, all_W_c)
341
+
342
+ for [x, y, t] in all_commands:
343
+ if t == 0:
344
+ dr.line((px+width, py, x+width, y), 255, 1)
345
+ px, py = x, y
346
+ width += np.max(all_commands[:, 0]) + 25
347
+
348
+ return im
349
+
350
+
351
+ def sample_character_grid(letters, grid_size, net, all_loaded_data, device="cpu"):
352
+ """Generates an image of handwritten text based on target_sentence"""
353
+ width = 60
354
+ im = Image.fromarray(np.zeros([(grid_size + 1) * width, (grid_size + 1) * width]))
355
+ dr = ImageDraw.Draw(im)
356
+
357
+ M = len(letters)
358
+ mean_global_W = get_mean_global_W(net, all_loaded_data[0], device)
359
+
360
+ # all_Ws = torch.zeros(1, M, L)
361
+ all_Cs = torch.zeros(1, M, L, L)
362
+ for i in range(M): # get corners of grid
363
+ W_vector, char_matrix = get_DSD(net, letters[i], [mean_global_W], [all_loaded_data[0]], device)
364
+ # all_Ws[:, i, :] = W_vector
365
+ all_Cs[:, i, :, :] = char_matrix
366
+
367
+ all_Ws = mean_global_W.reshape(1, 1, L)
368
+
369
+ for i in range(grid_size):
370
+ for j in range(grid_size):
371
+ wx = i / (grid_size - 1)
372
+ wy = j / (grid_size - 1)
373
+
374
+ character_weights = [(1 - wx) * (1 - wy), # top left is 1 at (0, 0)
375
+ wx * (1 - wy), # top right is 1 at (1, 0)
376
+ (1 - wx) * wy, # bottom left is 1 at (0, 1)
377
+ wx * wy] # bottom right is 1 at (1, 1)
378
+ all_W_c = get_character_blend_W_c(character_weights, all_Ws, all_Cs)
379
+ all_commands = get_commands(net, letters[0], all_W_c)
380
+
381
+ offset_x = i * width
382
+ offset_y = j * width
383
+
384
+ for [x, y, t] in all_commands:
385
+ if t == 0:
386
+ dr.line((
387
+ px + offset_x + width/2,
388
+ py + offset_y - width/2, # letters are shifted down for some reason
389
+ x + offset_x + width/2,
390
+ y + offset_y - width/2), 255, 1)
391
+ px, py = x, y
392
+
393
+ return im
394
+
395
+ def writer_interpolation_video(target_sentence, transition_time, net, all_loaded_data, device="cpu"):
396
+ """
397
+ Generates a video of interpolating between each provided writer
398
+ """
399
+
400
+ n = len(all_loaded_data)
401
+
402
+ os.makedirs(f"./results/{target_sentence}_blend_frames", exist_ok=True)
403
+
404
+ words = target_sentence.split(' ')
405
+
406
+ writer_mean_Ws = []
407
+ for loaded_data in all_loaded_data:
408
+ mean_global_W = get_mean_global_W(net, loaded_data, device)
409
+ writer_mean_Ws.append(mean_global_W)
410
+
411
+ word_Ws = []
412
+ word_Cs = []
413
+
414
+ for word in words:
415
+ all_writer_Ws, all_writer_Cs = get_DSD(net, word, writer_mean_Ws, all_loaded_data, device)
416
+ word_Ws.append(all_writer_Ws)
417
+ word_Cs.append(all_writer_Cs)
418
+
419
+ for i in range(n - 1):
420
+ for j in range(transition_time):
421
+ im = Image.fromarray(np.zeros([160, 750]))
422
+ dr = ImageDraw.Draw(im)
423
+ width = 50
424
+
425
+ completion = j/(transition_time)
426
+
427
+ individual_weights = [1 - completion, completion]
428
+ writer_weights = [0] * i + individual_weights + [0] * (n - 2 - i)
429
+
430
+ for k, word in enumerate(words):
431
+ all_writer_Ws, all_writer_Cs = word_Ws[k], word_Cs[k]
432
+ all_W_c = get_writer_blend_W_c(writer_weights, all_writer_Ws, all_writer_Cs)
433
+ all_commands = get_commands(net, word, all_W_c)
434
+
435
+ for [x, y, t] in all_commands:
436
+ if t == 0:
437
+ dr.line((px+width, py, x+width, y), 255, 1)
438
+ px, py = x, y
439
+ width += np.max(all_commands[:, 0]) + 25
440
+
441
+ im.convert("RGB").save(f"./results/{target_sentence}_blend_frames/frame_{str(i * transition_time + j).zfill(3)}.png")
442
+
443
+ # Convert fromes to video using ffmpeg
444
+ photos = ffmpeg.input(f"./results/{target_sentence}_blend_frames/frame_*.png", pattern_type='glob', framerate=10)
445
+ videos = photos.output(f"results/{target_sentence}_blend_video.mov", vcodec="libx264", pix_fmt="yuv420p")
446
+ videos.run(overwrite_output=True)
447
+
448
+ def mdn_single_sample(target_word, scale_sd, clamp_mdn, net, all_loaded_data, device):
449
+ '''
450
+ Method creating gif of mdn samples
451
+ num_samples: number of samples to be inputted
452
+ max_scale: the maximum value used to scale SD while sampling (increment is based on num samples)
453
+ '''
454
+ words = target_word.split(' ')
455
+ im = Image.fromarray(np.zeros([160, 750]))
456
+ dr = ImageDraw.Draw(im)
457
+ width = 50
458
+
459
+ net.scale_sd = scale_sd
460
+ net.clamp_mdn = clamp_mdn
461
+
462
+ mean_global_W = get_mean_global_W(net, all_loaded_data[0], device)
463
+
464
+ for word in words:
465
+ writer_Ws, writer_Cs = get_DSD(net, word, [mean_global_W], [all_loaded_data[0]], device)
466
+ all_W_c = get_writer_blend_W_c([1], writer_Ws, writer_Cs)
467
+ all_commands = get_commands(net, word, all_W_c)
468
+
469
+ for [x, y, t] in all_commands:
470
+ if t == 0:
471
+ dr.line((px+width, py, x+width, y), 255, 1)
472
+ px, py = x, y
473
+ width += np.max(all_commands[:, 0]) + 25
474
+
475
+ return im
476
+
477
+ def sample_blended_chars(character_weights, letters, net, all_loaded_data, device="cpu"):
478
+ """Generates an image of handwritten text based on target_sentence"""
479
+
480
+ width = 60
481
+ im = Image.fromarray(np.zeros([100, 100]))
482
+ dr = ImageDraw.Draw(im)
483
+
484
+ M = len(letters)
485
+ mean_global_W = get_mean_global_W(net, all_loaded_data[0], device)
486
+
487
+ all_Cs = torch.zeros(1, M, L, L)
488
+ for i in range(M): # get corners of grid
489
+ W_vector, char_matrix = get_DSD(net, letters[i], [mean_global_W], [all_loaded_data[0]], device)
490
+ all_Cs[:, i, :, :] = char_matrix
491
+
492
+ all_Ws = mean_global_W.reshape(1, 1, L)
493
+
494
+ all_W_c = get_character_blend_W_c(character_weights, all_Ws, all_Cs)
495
+ all_commands = get_commands(net, letters[0], all_W_c)
496
+
497
+ for [x, y, t] in all_commands:
498
+ if t == 0:
499
+ dr.line((
500
+ px + width/2,
501
+ py - width/2, # letters are shifted down for some reason
502
+ x + width/2,
503
+ y - width/2), 255, 1)
504
+ px, py = x, y
505
+
506
+
507
+ return im
508
+
509
+
510
+ def char_interpolation_video(letters, transition_time, net, all_loaded_data, device="cpu"):
511
+ """Generates an image of handwritten text based on target_sentence"""
512
+
513
+ os.makedirs(f"./results/{''.join(letters)}_frames", exist_ok=True) # make a folder for the frames
514
+
515
+ width = 50
516
+
517
+ M = len(letters)
518
+ mean_global_W = get_mean_global_W(net, all_loaded_data[0], device)
519
+
520
+ all_Cs = torch.zeros(1, M, L, L)
521
+ for i in range(M): # get corners of grid
522
+ W_vector, char_matrix = get_DSD(net, letters[i], [mean_global_W], [all_loaded_data[0]], device)
523
+ all_Cs[:, i, :, :] = char_matrix
524
+
525
+ all_Ws = mean_global_W.reshape(1, 1, L)
526
+
527
+ for i in range(M - 1):
528
+ for j in range(transition_time):
529
+ completion = j / (transition_time - 1)
530
+ individual_weights = [1 - completion, completion]
531
+ character_weights = [0] * i + individual_weights + [0] * (M - 2 - i)
532
+ all_W_c = get_character_blend_W_c(character_weights, all_Ws, all_Cs)
533
+ all_commands = get_commands(net, "change this later!", all_W_c)
534
+
535
+ im = Image.fromarray(np.zeros([100, 100]))
536
+ dr = ImageDraw.Draw(im)
537
+
538
+ for [x, y, t] in all_commands:
539
+ if t == 0:
540
+ dr.line((
541
+ px + width/2,
542
+ py - width/2, # letters are shifted down for some reason
543
+ x + width/2,
544
+ y - width/2), 255, 1)
545
+ px, py = x, y
546
+
547
+
548
+ im.convert("RGB").save(f"results/{''.join(letters)}_frames/frames_{str(i * transition_time + j).zfill(3)}.png")
549
+
550
+ # Convert fromes to video using ffmpeg
551
+ photos = ffmpeg.input(f"results/{''.join(letters)}_frames/frames_*.png", pattern_type='glob', framerate=24)
552
+ videos = photos.output(f"results/{''.join(letters)}_video.mov", vcodec="libx264", pix_fmt="yuv420p")
553
+ videos.run(overwrite_output=True)
554
+
555
+
data/writers/120/0.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:488e337dc15361658a8bab492c9e889daad1acca812d9a11fb8e369219fab6ef
3
+ size 175537
data/writers/120/1.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a04bde9644e378ebb2dab04306af81858b967a33f96685c3645cd37615880ebb
3
+ size 134815
data/writers/120/10.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20d95ba927366959e2f4b19bc0b932c3532930d3519a5003a357a46137785d39
3
+ size 134965
data/writers/120/100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:351a2367d904833fbccec18a4a04d979e20493d7e5b0be5b46bdb0be5992dbf1
3
+ size 127588
data/writers/120/101.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c008de6c369b3b50cbf31f89b7ad7220164d8149ac4e9dd6e1b91017931a4d60
3
+ size 121980
data/writers/120/102.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4e4f83e793e38199a813bea019288d3f04433844384b21806745a9e5c51cfad
3
+ size 107769
data/writers/120/103.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a5626912691f52d426b2d2c37ad29194bb0bfc8cfeb5a42a9e16927bab7f79e
3
+ size 110661
data/writers/120/104.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff65734b3bd0e695e6944ec5a79e40bb00c1766fdc5fe1dc39300c8be38bd15b
3
+ size 108546
data/writers/120/105.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2b5c9a945f6efb3bef5fb0255a646aeafd41ea15ef75a2578f908392511897d
3
+ size 103809
data/writers/120/106.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:991b1db11a0514cff5134dc364790d3af3aaf22a9bdea170ee47b38801f4f684
3
+ size 108246
data/writers/120/107.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c863110c0a0d51d817afa6fbe55e18f4207e1b00a4960fc9f8d08cc351c38851
3
+ size 122094
data/writers/120/108.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:787de7d2534ec295ffd6df24b7cbed10dd84f433ce61e6aab0b9b9541404c0b0
3
+ size 134977
data/writers/120/109.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed4d6be1fc96084d741f95ff21d62f822c411ef60baf327e48e8e512fafdb75
3
+ size 112104
data/writers/120/11.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:550bc976e80d45d669c9154d82a066629f92f39be40403e5c554e5f2b87231c7
3
+ size 125640
data/writers/120/110.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2204bedcd2e09d457c4641c4039ecc0bcc1913d29a5617e590baf773f6b4667c
3
+ size 104715
data/writers/120/111.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c9b6a13f901029afafd7b587f0785ed0325f96ba89e2656467741a342aee233
3
+ size 103260
data/writers/120/112.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d65de4fc97401f4e65bb415d4357e42e6887e0e3ad03e62b73f7be428b8b80d
3
+ size 128293
data/writers/120/113.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe308240dd20d0d21692a669d10ec2c0022c03fd5db85b5c4a7fd7430bc3a0fb
3
+ size 127927
data/writers/120/114.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b677482a49f5de3cc6cc4e2e98bea3cb88bf9869ab16b16b235c35464984bd8
3
+ size 104076
data/writers/120/115.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a63b963d93d303bde992da6dce4ab97258eac80310172553ef035fd5f17114bc
3
+ size 91845
data/writers/120/116.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48c9f17dbdd9fcd9abcdccda4b96c465f5e3423b63f556a70a060c2e49d68f5f
3
+ size 144559
data/writers/120/117.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc36c22be7ba098958dbca1ee33d93b892d61162f2b15af232e819af8bb01dee
3
+ size 131440
data/writers/120/118.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94d26ef8069e4171db86239a571ea9238b257e29e7f45eaf931bf348109d674b
3
+ size 113775
data/writers/120/119.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:844575ae6d80e7d99ee991f394efbf34e187fea4f07a37e65dc152360070af99
3
+ size 62214
data/writers/120/12.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da73c582d167dd1e643b5ac961458559bdb7bf8385c0a276865eac93e1708efb
3
+ size 153187
data/writers/120/120.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d07ff24e6a822dfe0da15678ec6fb39d3d569b615c9465494ab2d71f9dcb5618
3
+ size 144793
data/writers/120/121.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8614d0d5a91e2269af3ccfcf67e5fc3629dff45e66d7ce7b5f6c942ebc6c1dd
3
+ size 115830
data/writers/120/122.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dbcfdf6c3842aba365f0dd4188966d60b1d2b3686cf027efc22d0736ddc29ac
3
+ size 143827
data/writers/120/123.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0df71728a9424a79b2570f8da50dd50470df861fdfb3f32165331b306ca09f57
3
+ size 92145
data/writers/120/124.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c976cf8a9d94a08acf260ab9602025fc058293b00c52c693d49ea4219fd28a9
3
+ size 125640
data/writers/120/125.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd843eb48e8563800945224760dedf90b9624c063182e09f8ad3767d448b7fb8
3
+ size 101817
data/writers/120/126.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7755adb199c440d99ffbcbfc248098d237de982c3f5f5a28a169fff430e94368
3
+ size 111777
data/writers/120/127.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1dcc75ca7e80fa07f1edfa6538322f92f90ac2d11265a4676be6e045a9db3f3
3
+ size 111771
data/writers/120/128.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43ebcd79b32266a6c9e307f590bee292f9b90aaa7552ef5b5b2dd9a857a58a43
3
+ size 133333
data/writers/120/129.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c663a98541bdda78554f5c2d83f07488cbfebb9cf9108261e5730375cf1d452
3
+ size 126411
data/writers/120/13.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808878ebe3eb40eef2d75f33db32cb4efd22cac468c15179ca69c92304c3e379
3
+ size 130951
data/writers/120/130.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe5aacdddcb712e6e540cec76628ad15ed16906b6452b9e08f9c41449f1df92f
3
+ size 145885
data/writers/120/131.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd771d047b5a7787106286bbef893127f167e749d45a268e94607c699639079
3
+ size 106146
data/writers/120/132.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9075a3167827689141f85211d37f0989c6f0309240820c1924d6dfd2f059dd0
3
+ size 134359