asigalov61 commited on
Commit
28d2673
1 Parent(s): a1cfa89

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -75
app.py CHANGED
@@ -208,50 +208,65 @@ def InpaintPitches(input_midi, input_num_of_notes, input_patch_number):
208
  temperature=0.9
209
  num_memory_tokens=4096
210
 
211
- output = []
212
 
213
- idx = 0
214
 
215
- for c in chords[:input_num_tokens]:
216
-
217
- output.append(c)
218
 
219
- if input_conditioning_type == 'Chords-Times' or input_conditioning_type == 'Chords-Times-Durations':
220
- output.append(times[idx])
221
 
222
- if input_conditioning_type == 'Chords-Times-Durations':
223
- output.append(durs[idx])
224
-
225
- x = torch.tensor([output] * 1, dtype=torch.long, device=DEVICE)
226
-
227
- o = 0
228
-
229
- ncount = 0
230
-
231
- while o < 384 and ncount < max_chords_limit:
232
- with ctx:
233
- out = model.generate(x[-num_memory_tokens:],
234
- 1,
235
- temperature=temperature,
236
- return_prime=False,
237
- verbose=False)
238
-
239
- o = out.tolist()[0][0]
240
-
241
- if 256 <= o < 384:
242
- ncount += 1
243
-
244
- if o < 384:
245
- x = torch.cat((x, out), 1)
246
-
247
- outy = x.tolist()[0][len(output):]
248
-
249
- output.extend(outy)
250
 
251
- idx += 1
252
-
253
- if idx == len(chords[:input_num_tokens])-1:
254
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
  print('=' * 70)
257
  print('Done!')
@@ -261,14 +276,12 @@ def InpaintPitches(input_midi, input_num_of_notes, input_patch_number):
261
  print('Rendering results...')
262
 
263
  print('=' * 70)
264
- print('Sample INTs', output[:12])
265
  print('=' * 70)
266
 
267
- out1 = output
268
-
269
- if len(out1) != 0:
270
 
271
- song = out1
272
  song_f = []
273
 
274
  time = 0
@@ -276,33 +289,52 @@ def InpaintPitches(input_midi, input_num_of_notes, input_patch_number):
276
  vel = 90
277
  pitch = 0
278
  channel = 0
279
-
280
- patches = [0] * 16
281
-
282
- channel = 0
283
-
 
284
  for ss in song:
285
-
286
- if 0 <= ss < 128:
287
-
288
- time += ss * 32
289
-
290
- if 128 <= ss < 256:
291
-
292
- dur = (ss-128) * 32
293
-
294
- if 256 <= ss < 384:
295
-
296
- pitch = (ss-256)
297
-
298
- vel = max(40, pitch)
299
-
300
- song_f.append(['note', time, dur, channel, pitch, vel, 0])
301
-
302
- fn1 = "Chords-Progressions-Transformer-Composition"
303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
305
- output_signature = 'Chords Progressions Transformer',
306
  output_file_name = fn1,
307
  track_name='Project Los Angeles',
308
  list_of_MIDI_patches=patches
@@ -359,15 +391,15 @@ if __name__ == "__main__":
359
 
360
  app = gr.Blocks()
361
  with app:
362
- gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Chords Progressions Transformer</h1>")
363
- gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Chords-conditioned music transformer</h1>")
364
  gr.Markdown(
365
- "![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Chords-Progressions-Transformer&style=flat)\n\n"
366
- "Generate music based on chords progressions\n\n"
367
- "Check out [Chords Progressions Transformer](https://github.com/asigalov61/Chords-Progressions-Transformer) on GitHub!\n\n"
368
  "[Open In Colab]"
369
- "(https://colab.research.google.com/github/asigalov61/Chords-Progressions-Transformer/blob/main/Chords_Progressions_Transformer.ipynb)"
370
- " for faster execution and endless generation"
371
  )
372
  gr.Markdown("## Upload your MIDI or select a sample example MIDI")
373
 
 
208
  temperature=0.9
209
  num_memory_tokens=4096
210
 
211
+ #@title Pitches/Instruments Inpainting
212
 
213
+ #@markdown You can stop the inpainting at any time to render partial results
214
 
215
+ #@markdown Inpainting settings
 
 
216
 
217
+ #@markdown Select MIDI patch present in the composition to inpaint
 
218
 
219
+ inpaint_MIDI_patch = input_patch_number
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
+ #@markdown Generation settings
222
+
223
+ number_of_prime_tokens = 90 # @param {type:"slider", min:3, max:8190, step:3}
224
+ number_of_memory_tokens = 2048 # @param {type:"slider", min:3, max:8190, step:3}
225
+ number_of_samples_per_inpainted_note = 1 #@param {type:"slider", min:1, max:16, step:1}
226
+
227
+ print('=' * 70)
228
+ print('Giant Music Transformer Inpainting Model Generator')
229
+ print('=' * 70)
230
+
231
+ out2 = []
232
+
233
+ for m in melody_chords[:number_of_prime_tokens]:
234
+ out2.append(m)
235
+
236
+ for i in range(number_of_prime_tokens, len(melody_chords)):
237
+
238
+ cpatch = (melody_chords[i]-2304) // 129
239
+
240
+ if 2304 <= melody_chords[i] < 18945 and (cpatch) == inpaint_MIDI_patch:
241
+
242
+ samples = []
243
+
244
+ for j in range(number_of_samples_per_inpainted_note):
245
+
246
+ inp = torch.LongTensor(out2[-number_of_memory_tokens:]).cuda()
247
+
248
+ with ctx:
249
+ out1 = model.generate(inp,
250
+ 1,
251
+ temperature=temperature,
252
+ return_prime=True,
253
+ verbose=False)
254
+
255
+ with torch.no_grad():
256
+ test_loss, test_acc = model(out1)
257
+
258
+ samples.append([out1.tolist()[0][-1], test_acc.tolist()])
259
+
260
+ accs = [y[1] for y in samples]
261
+ max_acc = max(accs)
262
+ max_acc_sample = samples[accs.index(max_acc)][0]
263
+
264
+ cpitch = (max_acc_sample-2304) % 129
265
+
266
+ out2.extend([((cpatch * 129) + cpitch)+2304])
267
+
268
+ else:
269
+ out2.append(melody_chords[i])
270
 
271
  print('=' * 70)
272
  print('Done!')
 
276
  print('Rendering results...')
277
 
278
  print('=' * 70)
279
+ print('Sample INTs', out2[:12])
280
  print('=' * 70)
281
 
282
+ if len(out2) != 0:
 
 
283
 
284
+ song = out2
285
  song_f = []
286
 
287
  time = 0
 
289
  vel = 90
290
  pitch = 0
291
  channel = 0
292
+
293
+ patches = [-1] * 16
294
+
295
+ channels = [0] * 16
296
+ channels[9] = 1
297
+
298
  for ss in song:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
+ if 0 <= ss < 256:
301
+
302
+ time += ss * 16
303
+
304
+ if 256 <= ss < 2304:
305
+
306
+ dur = ((ss-256) // 8) * 16
307
+ vel = (((ss-256) % 8)+1) * 15
308
+
309
+ if 2304 <= ss < 18945:
310
+
311
+ patch = (ss-2304) // 129
312
+
313
+ if patch < 128:
314
+
315
+ if patch not in patches:
316
+ if 0 in channels:
317
+ cha = channels.index(0)
318
+ channels[cha] = 1
319
+ else:
320
+ cha = 15
321
+
322
+ patches[cha] = patch
323
+ channel = patches.index(patch)
324
+ else:
325
+ channel = patches.index(patch)
326
+
327
+ if patch == 128:
328
+ channel = 9
329
+
330
+ pitch = (ss-2304) % 129
331
+
332
+ song_f.append(['note', time, dur, channel, pitch, vel, patch ])
333
+
334
+ patches = [0 if x==-1 else x for x in patches]
335
+
336
  detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
337
+ output_signature = 'Giant Music Transformer',
338
  output_file_name = fn1,
339
  track_name='Project Los Angeles',
340
  list_of_MIDI_patches=patches
 
391
 
392
  app = gr.Blocks()
393
  with app:
394
+ gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Inpaint Music Transformer</h1>")
395
+ gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Inpaint pitches in any MIDI</h1>")
396
  gr.Markdown(
397
+ "![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Inpaint-Music-Transformer&style=flat)\n\n"
398
+ "This is a demo of the Giant Music Transformer pitches inpainting feature\n\n"
399
+ "Check out [Giant Music Transformer](https://github.com/asigalov61/Giant-Music-Transformer) on GitHub!\n\n"
400
  "[Open In Colab]"
401
+ "(https://colab.research.google.com/github/asigalov61/Giant-Music-Transformer/blob/main/Giant_Music_Transformer.ipynb)"
402
+ " for all features, faster execution and endless generation"
403
  )
404
  gr.Markdown("## Upload your MIDI or select a sample example MIDI")
405