asigalov61's picture
Update app.py
ff50814 verified
raw
history blame
No virus
13.5 kB
import os.path
import time as reqtime
import datetime
from pytz import timezone
import torch
import spaces
import gradio as gr
from x_transformer_1_23_2 import *
import random
import tqdm
from midi_to_colab_audio import midi_to_colab_audio
import TMIDIX
import matplotlib.pyplot as plt
in_space = os.getenv("SYSTEM") == "spaces"
# =================================================================================================
@spaces.GPU
def GenerateMusic(input_title, input_num_tokens, input_prompt_type):
print('=' * 70)
print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
start_time = reqtime.time()
print('Loading model...')
SEQ_LEN = 4096 # Models seq len
PAD_IDX = 2571 # Models pad index
DEVICE = 'cuda' # 'cuda'
# instantiate the model
model = TransformerWrapper(
num_tokens = PAD_IDX+1,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 2048, depth = 8, heads = 16, attn_flash = True)
)
model = AutoregressiveWrapper(model, ignore_index = PAD_IDX)
model.to(DEVICE)
print('=' * 70)
print('Loading model checkpoint...')
model.load_state_dict(
torch.load('Text_to_Music_Transformer_Medium_Trained_Model_33934_steps_0.6093_loss_0.813_acc.pth',
map_location=DEVICE))
print('=' * 70)
model.eval()
if DEVICE == 'cpu':
dtype = torch.bfloat16
else:
dtype = torch.float16
ctx = torch.amp.autocast(device_type=DEVICE, dtype=dtype)
print('Done!')
print('=' * 70)
input_num_tokens = max(8, min(2048, input_num_tokens))
print('-' * 70)
print('Input title:', input_title)
print('Req num toks:', input_num_tokens)
print('Open-ended prompt:', input_prompt_type)
print('-' * 70)
#===============================================================================
print('Setting up model patches and loading helper functions...')
# @title Setup and load model channels MIDI patches
model_channel_0_piano_family = "Acoustic Grand" # @param ["Acoustic Grand", "Bright Acoustic", "Electric Grand", "Honky-Tonk", "Electric Piano 1", "Electric Piano 2", "Harpsichord", "Clav"]
model_channel_1_chromatic_percussion_family = "Music Box" # @param ["Celesta", "Glockenspiel", "Music Box", "Vibraphone", "Marimba", "Xylophone", "Tubular Bells", "Dulcimer"]
model_channel_2_organ_family = "Church Organ" # @param ["Drawbar Organ", "Percussive Organ", "Rock Organ", "Church Organ", "Reed Organ", "Accordion", "Harmonica", "Tango Accordion"]
model_channel_3_guitar_family = "Acoustic Guitar(nylon)" # @param ["Acoustic Guitar(nylon)", "Acoustic Guitar(steel)", "Electric Guitar(jazz)", "Electric Guitar(clean)", "Electric Guitar(muted)", "Overdriven Guitar", "Distortion Guitar", "Guitar Harmonics"]
model_channel_4_bass_family = "Fretless Bass" # @param ["Acoustic Bass", "Electric Bass(finger)", "Electric Bass(pick)", "Fretless Bass", "Slap Bass 1", "Slap Bass 2", "Synth Bass 1", "Synth Bass 2"]
model_channel_5_strings_family = "Violin" # @param ["Violin", "Viola", "Cello", "Contrabass", "Tremolo Strings", "Pizzicato Strings", "Orchestral Harp", "Timpani"]
model_channel_6_ensemble_family = "Choir Aahs" # @param ["String Ensemble 1", "String Ensemble 2", "SynthStrings 1", "SynthStrings 2", "Choir Aahs", "Voice Oohs", "Synth Voice", "Orchestra Hit"]
model_channel_7_brass_family = "Trumpet" # @param ["Trumpet", "Trombone", "Tuba", "Muted Trumpet", "French Horn", "Brass Section", "SynthBrass 1", "SynthBrass 2"]
model_channel_8_reed_family = "Alto Sax" # @param ["Soprano Sax", "Alto Sax", "Tenor Sax", "Baritone Sax", "Oboe", "English Horn", "Bassoon", "Clarinet"]
model_channel_9_pipe_family = "Flute" # @param ["Piccolo", "Flute", "Recorder", "Pan Flute", "Blown Bottle", "Skakuhachi", "Whistle", "Ocarina"]
model_channel_10_synth_lead_family = "Lead 8 (bass+lead)" # @param ["Lead 1 (square)", "Lead 2 (sawtooth)", "Lead 3 (calliope)", "Lead 4 (chiff)", "Lead 5 (charang)", "Lead 6 (voice)", "Lead 7 (fifths)", "Lead 8 (bass+lead)"]
model_channel_11_synth_pad_family = "Pad 2 (warm)" # @param ["Pad 1 (new age)", "Pad 2 (warm)", "Pad 3 (polysynth)", "Pad 4 (choir)", "Pad 5 (bowed)", "Pad 6 (metallic)", "Pad 7 (halo)", "Pad 8 (sweep)"]
model_channel_12_synth_effects_family = "FX 3 (crystal)" # @param ["FX 1 (rain)", "FX 2 (soundtrack)", "FX 3 (crystal)", "FX 4 (atmosphere)", "FX 5 (brightness)", "FX 6 (goblins)", "FX 7 (echoes)", "FX 8 (sci-fi)"]
model_channel_13_ethnic_family = "Banjo" # @param ["Sitar", "Banjo", "Shamisen", "Koto", "Kalimba", "Bagpipe", "Fiddle", "Shanai"]
model_channel_14_percussive_family = "Melodic Tom" # @param ["Tinkle Bell", "Agogo", "Steel Drums", "Woodblock", "Taiko Drum", "Melodic Tom", "Synth Drum", "Reverse Cymbal"]
model_channel_15_sound_effects_family = "Bird Tweet" # @param ["Guitar Fret Noise", "Breath Noise", "Seashore", "Bird Tweet", "Telephone Ring", "Helicopter", "Applause", "Gunshot"]
model_channel_16_drums_family = "Drums" # @param ["Drums"]
print('=' * 70)
print('Loading helper functions...')
def txt2tokens(txt):
return [ord(char)+2440 if 0 < ord(char) < 128 else 0+2440 for char in txt.lower()]
def tokens2txt(tokens):
return [chr(tok-2440) for tok in tokens if 0+2440 < tok < 128+2440 ]
print('=' * 70)
print('Setting up patches...')
print('=' * 70)
instruments = [v[1] for v in TMIDIX.Number2patch.items()]
patches = [instruments.index(model_channel_0_piano_family),
instruments.index(model_channel_1_chromatic_percussion_family),
instruments.index(model_channel_2_organ_family),
instruments.index(model_channel_3_guitar_family),
instruments.index(model_channel_4_bass_family),
instruments.index(model_channel_5_strings_family),
instruments.index(model_channel_6_ensemble_family),
instruments.index(model_channel_7_brass_family),
instruments.index(model_channel_8_reed_family),
9, # Drums patch
instruments.index(model_channel_9_pipe_family),
instruments.index(model_channel_10_synth_lead_family),
instruments.index(model_channel_11_synth_pad_family),
instruments.index(model_channel_12_synth_effects_family),
instruments.index(model_channel_13_ethnic_family),
instruments.index(model_channel_15_sound_effects_family)
]
print('Done!')
print('=' * 70)
print('Generating...')
#@title Standard Text-to-Music Generator
#@markdown Prompt settings
song_title_prompt = input_title
open_ended_prompt = input_prompt_type
#@markdown Generation settings
number_of_tokens_to_generate = input_num_tokens
number_of_batches_to_generate = 1 #@param {type:"slider", min:1, max:16, step:1}
temperature = 0.9 # @param {type:"slider", min:0.1, max:1, step:0.05}
print('=' * 70)
print('Text-to-Music Model Generator')
print('=' * 70)
if song_title_prompt == '':
outy = [2569]
else:
if open_ended_prompt:
outy = [2569] + txt2tokens(song_title_prompt)
else:
outy = [2569] + txt2tokens(song_title_prompt) + [2570]
print('Selected prompt sequence:')
print(outy[:12])
print('=' * 70)
torch.cuda.empty_cache()
inp = [outy] * number_of_batches_to_generate
inp = torch.LongTensor(inp).cuda()
with ctx:
out = model.generate(inp,
number_of_tokens_to_generate,
temperature=temperature,
return_prime=True,
verbose=False)
out0 = out.tolist()
print('=' * 70)
print('Done!')
print('=' * 70)
#===============================================================================
print('Rendering results...')
print('=' * 70)
out1 = out0[0]
print('Sample INTs', out1[:12])
print('=' * 70)
generated_song_title = ''.join(tokens2txt(out1)).title()
print('Generated song title:', generated_song_title)
print('=' * 70)
if len(out1) != 0:
song = out1
song_f = []
time = 0
dur = 0
vel = 90
pitch = 0
channel = 0
chan = 0
for ss in song:
if 0 <= ss < 128:
time += ss * 32
if 128 <= ss < 256:
dur = (ss-128) * 32
if 256 <= ss < 2432:
chan = (ss-256) // 128
if chan < 9:
channel = chan
elif 9 < chan < 15:
channel = chan+1
elif chan == 15:
channel = 15
elif chan == 16:
channel = 9
pitch = (ss-256) % 128
if 2432 <= ss < 2440:
vel = (((ss-2432)+1) * 15)-1
song_f.append(['note', time, dur, channel, pitch, vel, chan*8 ])
fn1 = "Text-to-Music-Transformer-Composition"
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
output_signature = 'Text-to-Music Transformer',
output_file_name = fn1,
track_name='Project Los Angeles',
list_of_MIDI_patches=patches
)
new_fn = fn1+'.mid'
audio = midi_to_colab_audio(new_fn,
soundfont_path=soundfont,
sample_rate=16000,
volume_scale=10,
output_for_gradio=True
)
print('Done!')
print('=' * 70)
#========================================================
output_midi_title = generated_song_title
output_midi_summary = str(song_f[:3])
output_midi = str(new_fn)
output_audio = (16000, audio)
output_plot = TMIDIX.plot_ms_SONG(song_f, plot_title=output_midi, return_plt=True)
print('Output MIDI file name:', output_midi)
print('Output MIDI title:', output_midi_title)
print('Output MIDI summary:', output_midi_summary)
print('=' * 70)
#========================================================
print('-' * 70)
print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('-' * 70)
print('Req execution time:', (reqtime.time() - start_time), 'sec')
return output_midi_title, output_midi_summary, output_midi, output_audio, output_plot
# =================================================================================================
if __name__ == "__main__":
PDT = timezone('US/Pacific')
print('=' * 70)
print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('=' * 70)
soundfont = "SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2"
app = gr.Blocks()
with app:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Descriptive Music Transformer</h1>")
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>A music transformer that describes music it generates</h1>")
gr.Markdown(
"![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Descriptive-Music-Transformer&style=flat)\n\n"
"Generate music based on a title of your imagination :)\n\n"
"Check out [Annotated MIDI Dataset](https://huggingface.co/datasets/asigalov61/Annotated-MIDI-Dataset) on Hugging Face!\n\n"
"[Open In Colab]"
"(https://colab.research.google.com/github/asigalov61/Text-to-Music-Transformer/blob/main/Text_to_Music_Transformer.ipynb)"
" for faster execution and endless generation"
)
gr.Markdown("## Enter any desired song title")
input_title = gr.Textbox(value="Nothing Else Matters", label="Song title")
input_prompt_type = gr.Checkbox(label="Open-ended prompt")
input_num_tokens = gr.Slider(8, 2048, value=512, step=8, label="Number of tokens to generate")
run_btn = gr.Button("generate", variant="primary")
gr.Markdown("## Generation results")
output_midi_title = gr.Textbox(label="Generated MIDI title")
output_midi_summary = gr.Textbox(label="Output MIDI summary")
output_audio = gr.Audio(label="Output MIDI audio", format="wav", elem_id="midi_audio")
output_plot = gr.Plot(label="Output MIDI score plot")
output_midi = gr.File(label="Output MIDI file", file_types=[".mid"])
run_event = run_btn.click(GenerateMusic, [input_title, input_num_tokens, input_prompt_type],
[output_midi_title, output_midi_summary, output_midi, output_audio, output_plot])
app.queue().launch()