Spaces:
Sleeping
Sleeping
asigalov61
commited on
Commit
•
a3e0baa
1
Parent(s):
c913064
Upload 2 files
Browse files- app.py +165 -146
- midi_synthesizer.py +2 -9
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import argparse
|
2 |
import glob
|
|
|
3 |
import os.path
|
4 |
|
5 |
import torch
|
@@ -16,12 +17,12 @@ import TMIDIX
|
|
16 |
import matplotlib.pyplot as plt
|
17 |
|
18 |
in_space = os.getenv("SYSTEM") == "spaces"
|
19 |
-
|
20 |
-
#=================================================================================================
|
21 |
|
22 |
-
@torch.no_grad()
|
23 |
-
def GenerateMIDI(num_tok, idrums, iinstr, progress=gr.Progress()):
|
24 |
|
|
|
|
|
|
|
|
|
25 |
print('=' * 70)
|
26 |
print('Req num tok', num_tok)
|
27 |
print('Req instr', iinstr)
|
@@ -33,139 +34,148 @@ def GenerateMIDI(num_tok, idrums, iinstr, progress=gr.Progress()):
|
|
33 |
else:
|
34 |
drums = 3073
|
35 |
|
36 |
-
instruments_list = ["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", 'Drums',
|
|
|
37 |
first_note_instrument_number = instruments_list.index(iinstr)
|
38 |
|
39 |
-
start_tokens = [3087, drums, 3075+first_note_instrument_number]
|
40 |
|
41 |
print('Selected Improv sequence:')
|
42 |
print(start_tokens)
|
43 |
print('=' * 70)
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
outy = start_tokens
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
for i in
|
48 |
-
|
49 |
inp = torch.LongTensor([outy]).cpu()
|
50 |
-
|
51 |
out = model.module.generate(inp,
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
out0 = out[0].tolist()
|
58 |
|
|
|
59 |
outy.extend(out0)
|
|
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
for ss in song:
|
77 |
-
|
78 |
-
ss1 = int(ss)
|
79 |
-
|
80 |
-
if ss1 > 0 and ss1 < 256:
|
81 |
-
|
82 |
-
time += ss1 * 8
|
83 |
-
|
84 |
-
if ss1 >= 256 and ss1 < 1280:
|
85 |
-
|
86 |
-
dur = ((ss1-256) // 8) * 32
|
87 |
-
vel = (((ss1-256) % 8)+1) * 15
|
88 |
-
|
89 |
-
if ss1 >= 1280 and ss1 < 2816:
|
90 |
-
channel = (ss1-1280) // 128
|
91 |
-
pitch = (ss1-1280) % 128
|
92 |
-
|
93 |
-
song_f.append(['note', int(time), int(dur), int(channel), int(pitch), int(vel) ])
|
94 |
-
|
95 |
-
output_signature = 'Allegro Music Transformer'
|
96 |
-
output_file_name = 'Allegro-Music-Transformer-Music-Composition'
|
97 |
-
track_name='Project Los Angeles'
|
98 |
-
list_of_MIDI_patches=[0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0]
|
99 |
-
number_of_ticks_per_quarter=500
|
100 |
-
text_encoding='ISO-8859-1'
|
101 |
-
|
102 |
-
output_header = [number_of_ticks_per_quarter,
|
103 |
-
[['track_name', 0, bytes(output_signature, text_encoding)]]]
|
104 |
-
|
105 |
-
patch_list = [['patch_change', 0, 0, list_of_MIDI_patches[0]],
|
106 |
-
['patch_change', 0, 1, list_of_MIDI_patches[1]],
|
107 |
-
['patch_change', 0, 2, list_of_MIDI_patches[2]],
|
108 |
-
['patch_change', 0, 3, list_of_MIDI_patches[3]],
|
109 |
-
['patch_change', 0, 4, list_of_MIDI_patches[4]],
|
110 |
-
['patch_change', 0, 5, list_of_MIDI_patches[5]],
|
111 |
-
['patch_change', 0, 6, list_of_MIDI_patches[6]],
|
112 |
-
['patch_change', 0, 7, list_of_MIDI_patches[7]],
|
113 |
-
['patch_change', 0, 8, list_of_MIDI_patches[8]],
|
114 |
-
['patch_change', 0, 9, list_of_MIDI_patches[9]],
|
115 |
-
['patch_change', 0, 10, list_of_MIDI_patches[10]],
|
116 |
-
['patch_change', 0, 11, list_of_MIDI_patches[11]],
|
117 |
-
['patch_change', 0, 12, list_of_MIDI_patches[12]],
|
118 |
-
['patch_change', 0, 13, list_of_MIDI_patches[13]],
|
119 |
-
['patch_change', 0, 14, list_of_MIDI_patches[14]],
|
120 |
-
['patch_change', 0, 15, list_of_MIDI_patches[15]],
|
121 |
-
['track_name', 0, bytes(track_name, text_encoding)]]
|
122 |
-
|
123 |
-
output = output_header + [patch_list + song_f]
|
124 |
|
125 |
midi_data = TMIDIX.score2midi(output, text_encoding)
|
126 |
-
|
127 |
with open(f"Allegro-Music-Transformer-Music-Composition.mid", 'wb') as f:
|
128 |
f.write(midi_data)
|
129 |
|
130 |
-
|
131 |
-
itrack = 1
|
132 |
-
|
133 |
-
opus = TMIDIX.score2opus(output)
|
134 |
-
|
135 |
-
while itrack < len(opus):
|
136 |
-
for event in opus[itrack]:
|
137 |
-
if (event[0] == 'note_on') or (event[0] == 'note_off'):
|
138 |
-
output1.append(event)
|
139 |
-
itrack += 1
|
140 |
-
|
141 |
-
audio = synthesis([500, output1], 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2')
|
142 |
-
|
143 |
-
x = []
|
144 |
-
y =[]
|
145 |
-
c = []
|
146 |
-
|
147 |
-
colors = ['red', 'yellow', 'green', 'cyan', 'blue', 'pink', 'orange', 'purple', 'gray', 'white', 'gold', 'silver']
|
148 |
-
|
149 |
-
for s in song_f:
|
150 |
-
x.append(s[1] / 1000)
|
151 |
-
y.append(s[4])
|
152 |
-
c.append(colors[s[3]])
|
153 |
-
|
154 |
-
plt.close()
|
155 |
-
plt.figure(figsize=(14,5))
|
156 |
-
ax=plt.axes(title='Allegro Music Transformer Composition')
|
157 |
-
ax.set_facecolor('black')
|
158 |
-
|
159 |
-
plt.scatter(x,y, c=c)
|
160 |
-
plt.xlabel("Time")
|
161 |
-
plt.ylabel("Pitch")
|
162 |
-
|
163 |
-
yield [500, output1], plt, "Allegro-Music-Transformer-Music-Composition.mid", (44100, audio)
|
164 |
-
|
165 |
-
#=================================================================================================
|
166 |
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
parser = argparse.ArgumentParser()
|
170 |
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
171 |
parser.add_argument("--port", type=int, default=7860, help="gradio server port")
|
@@ -174,51 +184,60 @@ if __name__ == "__main__":
|
|
174 |
print('Loading model...')
|
175 |
|
176 |
SEQ_LEN = 2048
|
177 |
-
|
178 |
# instantiate the model
|
179 |
-
|
180 |
model = TransformerWrapper(
|
181 |
-
num_tokens
|
182 |
-
max_seq_len
|
183 |
-
attn_layers
|
184 |
)
|
185 |
-
|
186 |
model = AutoregressiveWrapper(model)
|
187 |
-
|
188 |
model = torch.nn.DataParallel(model)
|
189 |
-
|
190 |
model.cpu()
|
191 |
print('=' * 70)
|
192 |
-
|
193 |
print('Loading model checkpoint...')
|
194 |
-
|
195 |
-
model.load_state_dict(
|
|
|
|
|
196 |
print('=' * 70)
|
197 |
-
|
198 |
-
model.eval()
|
199 |
-
|
200 |
print('Done!')
|
201 |
-
|
|
|
202 |
app = gr.Blocks()
|
203 |
with app:
|
204 |
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Allegro Music Transformer</h1>")
|
205 |
-
gr.Markdown(
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
|
|
|
|
|
|
215 |
input_num_tokens = gr.Slider(16, 512, value=256, label="Number of Tokens", info="Number of tokens to generate")
|
216 |
run_btn = gr.Button("generate", variant="primary")
|
|
|
217 |
|
218 |
output_midi_seq = gr.Variable()
|
|
|
219 |
output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio")
|
220 |
-
output_plot = gr.Plot(label="output plot")
|
221 |
output_midi = gr.File(label="output midi", file_types=[".mid"])
|
222 |
-
run_event = run_btn.click(GenerateMIDI, [input_num_tokens, input_drums, input_instrument],
|
223 |
-
|
224 |
-
|
|
|
|
|
|
1 |
import argparse
|
2 |
import glob
|
3 |
+
import json
|
4 |
import os.path
|
5 |
|
6 |
import torch
|
|
|
17 |
import matplotlib.pyplot as plt
|
18 |
|
19 |
in_space = os.getenv("SYSTEM") == "spaces"
|
|
|
|
|
20 |
|
|
|
|
|
21 |
|
22 |
+
# =================================================================================================
|
23 |
+
|
24 |
+
@torch.no_grad()
|
25 |
+
def GenerateMIDI(num_tok, idrums, iinstr):
|
26 |
print('=' * 70)
|
27 |
print('Req num tok', num_tok)
|
28 |
print('Req instr', iinstr)
|
|
|
34 |
else:
|
35 |
drums = 3073
|
36 |
|
37 |
+
instruments_list = ["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", 'Drums',
|
38 |
+
"Choir", "Organ"]
|
39 |
first_note_instrument_number = instruments_list.index(iinstr)
|
40 |
|
41 |
+
start_tokens = [3087, drums, 3075 + first_note_instrument_number]
|
42 |
|
43 |
print('Selected Improv sequence:')
|
44 |
print(start_tokens)
|
45 |
print('=' * 70)
|
46 |
+
|
47 |
+
output_signature = 'Allegro Music Transformer'
|
48 |
+
output_file_name = 'Allegro-Music-Transformer-Music-Composition'
|
49 |
+
track_name = 'Project Los Angeles'
|
50 |
+
list_of_MIDI_patches = [0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0]
|
51 |
+
number_of_ticks_per_quarter = 500
|
52 |
+
text_encoding = 'ISO-8859-1'
|
53 |
+
|
54 |
+
output_header = [number_of_ticks_per_quarter,
|
55 |
+
[['track_name', 0, bytes(output_signature, text_encoding)]]]
|
56 |
+
|
57 |
+
patch_list = [['patch_change', 0, 0, list_of_MIDI_patches[0]],
|
58 |
+
['patch_change', 0, 1, list_of_MIDI_patches[1]],
|
59 |
+
['patch_change', 0, 2, list_of_MIDI_patches[2]],
|
60 |
+
['patch_change', 0, 3, list_of_MIDI_patches[3]],
|
61 |
+
['patch_change', 0, 4, list_of_MIDI_patches[4]],
|
62 |
+
['patch_change', 0, 5, list_of_MIDI_patches[5]],
|
63 |
+
['patch_change', 0, 6, list_of_MIDI_patches[6]],
|
64 |
+
['patch_change', 0, 7, list_of_MIDI_patches[7]],
|
65 |
+
['patch_change', 0, 8, list_of_MIDI_patches[8]],
|
66 |
+
['patch_change', 0, 9, list_of_MIDI_patches[9]],
|
67 |
+
['patch_change', 0, 10, list_of_MIDI_patches[10]],
|
68 |
+
['patch_change', 0, 11, list_of_MIDI_patches[11]],
|
69 |
+
['patch_change', 0, 12, list_of_MIDI_patches[12]],
|
70 |
+
['patch_change', 0, 13, list_of_MIDI_patches[13]],
|
71 |
+
['patch_change', 0, 14, list_of_MIDI_patches[14]],
|
72 |
+
['patch_change', 0, 15, list_of_MIDI_patches[15]],
|
73 |
+
['track_name', 0, bytes(track_name, text_encoding)]]
|
74 |
+
|
75 |
+
output = output_header + [patch_list]
|
76 |
+
|
77 |
+
yield output, None, None, [create_msg("visualizer_clear", None)]
|
78 |
+
|
79 |
outy = start_tokens
|
80 |
+
time = 0
|
81 |
+
dur = 0
|
82 |
+
vel = 0
|
83 |
+
pitch = 0
|
84 |
+
channel = 0
|
85 |
|
86 |
+
for i in range(num_tok):
|
|
|
87 |
inp = torch.LongTensor([outy]).cpu()
|
88 |
+
|
89 |
out = model.module.generate(inp,
|
90 |
+
1,
|
91 |
+
temperature=0.9,
|
92 |
+
return_prime=False,
|
93 |
+
verbose=False)
|
|
|
|
|
94 |
|
95 |
+
out0 = out[0].tolist()
|
96 |
outy.extend(out0)
|
97 |
+
ss1 = int(out0[0])
|
98 |
|
99 |
+
if 0 < ss1 < 256:
|
100 |
+
time += ss1 * 8
|
101 |
+
|
102 |
+
if 256 <= ss1 < 1280:
|
103 |
+
dur = ((ss1 - 256) // 8) * 32
|
104 |
+
vel = (((ss1 - 256) % 8) + 1) * 15
|
105 |
+
|
106 |
+
if 1280 <= ss1 < 2816:
|
107 |
+
channel = (ss1 - 1280) // 128
|
108 |
+
pitch = (ss1 - 1280) % 128
|
109 |
+
event = ['note', int(time), int(dur), int(channel), int(pitch), int(vel)]
|
110 |
+
output[-1].append(event)
|
111 |
+
|
112 |
+
yield output, None, None, [create_msg("visualizer_append", event), create_msg("progress", [i + 1, num_tok])]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
midi_data = TMIDIX.score2midi(output, text_encoding)
|
115 |
+
|
116 |
with open(f"Allegro-Music-Transformer-Music-Composition.mid", 'wb') as f:
|
117 |
f.write(midi_data)
|
118 |
|
119 |
+
audio = synthesis(TMIDIX.score2opus(output), 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
+
yield output, "Allegro-Music-Transformer-Music-Composition.mid", (44100, audio), [
|
122 |
+
create_msg("visualizer_end", None)]
|
123 |
+
|
124 |
+
|
125 |
+
def cancel_run(mid_seq):
|
126 |
+
if mid_seq is None:
|
127 |
+
return None, None, None
|
128 |
+
text_encoding = 'ISO-8859-1'
|
129 |
+
midi_data = TMIDIX.score2midi(mid_seq, text_encoding)
|
130 |
+
|
131 |
+
with open(f"Allegro-Music-Transformer-Music-Composition.mid", 'wb') as f:
|
132 |
+
f.write(midi_data)
|
133 |
+
|
134 |
+
audio = synthesis(TMIDIX.score2opus(mid_seq), 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2')
|
135 |
+
|
136 |
+
yield "Allegro-Music-Transformer-Music-Composition.mid", (44100, audio), [
|
137 |
+
create_msg("visualizer_end", None)]
|
138 |
|
139 |
+
|
140 |
+
# =================================================================================================
|
141 |
+
|
142 |
+
def load_javascript(dir="javascript"):
|
143 |
+
scripts_list = glob.glob(f"{dir}/*.js")
|
144 |
+
javascript = ""
|
145 |
+
for path in scripts_list:
|
146 |
+
with open(path, "r", encoding="utf8") as jsfile:
|
147 |
+
javascript += f"\n<!-- {path} --><script>{jsfile.read()}</script>"
|
148 |
+
template_response_ori = gr.routes.templates.TemplateResponse
|
149 |
+
|
150 |
+
def template_response(*args, **kwargs):
|
151 |
+
res = template_response_ori(*args, **kwargs)
|
152 |
+
res.body = res.body.replace(
|
153 |
+
b'</head>', f'{javascript}</head>'.encode("utf8"))
|
154 |
+
res.init_headers()
|
155 |
+
return res
|
156 |
+
|
157 |
+
gr.routes.templates.TemplateResponse = template_response
|
158 |
+
|
159 |
+
|
160 |
+
class JSMsgReceiver(gr.HTML):
|
161 |
+
|
162 |
+
def __init__(self, **kwargs):
|
163 |
+
super().__init__(elem_id="msg_receiver", visible=False, **kwargs)
|
164 |
+
|
165 |
+
def postprocess(self, y):
|
166 |
+
if y:
|
167 |
+
y = f"<p>{json.dumps(y)}</p>"
|
168 |
+
return super().postprocess(y)
|
169 |
+
|
170 |
+
def get_block_name(self) -> str:
|
171 |
+
return "html"
|
172 |
+
|
173 |
+
|
174 |
+
def create_msg(name, data):
|
175 |
+
return {"name": name, "data": data}
|
176 |
+
|
177 |
+
|
178 |
+
if __name__ == "__main__":
|
179 |
parser = argparse.ArgumentParser()
|
180 |
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
181 |
parser.add_argument("--port", type=int, default=7860, help="gradio server port")
|
|
|
184 |
print('Loading model...')
|
185 |
|
186 |
SEQ_LEN = 2048
|
187 |
+
|
188 |
# instantiate the model
|
189 |
+
|
190 |
model = TransformerWrapper(
|
191 |
+
num_tokens=3088,
|
192 |
+
max_seq_len=SEQ_LEN,
|
193 |
+
attn_layers=Decoder(dim=1024, depth=32, heads=8)
|
194 |
)
|
195 |
+
|
196 |
model = AutoregressiveWrapper(model)
|
197 |
+
|
198 |
model = torch.nn.DataParallel(model)
|
199 |
+
|
200 |
model.cpu()
|
201 |
print('=' * 70)
|
202 |
+
|
203 |
print('Loading model checkpoint...')
|
204 |
+
|
205 |
+
model.load_state_dict(
|
206 |
+
torch.load('Allegro_Music_Transformer_Small_Trained_Model_56000_steps_0.9399_loss_0.7374_acc.pth',
|
207 |
+
map_location='cpu'))
|
208 |
print('=' * 70)
|
209 |
+
|
210 |
+
model.eval()
|
211 |
+
|
212 |
print('Done!')
|
213 |
+
|
214 |
+
load_javascript()
|
215 |
app = gr.Blocks()
|
216 |
with app:
|
217 |
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Allegro Music Transformer</h1>")
|
218 |
+
gr.Markdown(
|
219 |
+
"![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Allegro-Music-Transformer&style=flat)\n\n"
|
220 |
+
"Full-attention multi-instrumental music transformer featuring asymmetrical encoding with octo-velocity, and chords counters tokens, optimized for speed and performance\n\n"
|
221 |
+
"Check out [Allegro Music Transformer](https://github.com/asigalov61/Allegro-Music-Transformer) on GitHub!\n\n"
|
222 |
+
"[Open In Colab]"
|
223 |
+
"(https://colab.research.google.com/github/asigalov61/Allegro-Music-Transformer/blob/main/Allegro_Music_Transformer_Composer.ipynb)"
|
224 |
+
" for faster execution and endless generation"
|
225 |
+
)
|
226 |
+
js_msg = JSMsgReceiver()
|
227 |
+
input_drums = gr.Checkbox(label="Drums Controls", value=False, info="Drums present or not")
|
228 |
+
input_instrument = gr.Radio(
|
229 |
+
["Piano", "Guitar", "Bass", "Violin", "Cello", "Harp", "Trumpet", "Sax", "Flute", "Choir", "Organ"],
|
230 |
+
value="Piano", label="Lead Instrument Controls", info="Desired lead instrument")
|
231 |
input_num_tokens = gr.Slider(16, 512, value=256, label="Number of Tokens", info="Number of tokens to generate")
|
232 |
run_btn = gr.Button("generate", variant="primary")
|
233 |
+
interrupt_btn = gr.Button("interrupt")
|
234 |
|
235 |
output_midi_seq = gr.Variable()
|
236 |
+
output_midi_visualizer = gr.HTML(elem_id="midi_visualizer_container")
|
237 |
output_audio = gr.Audio(label="output audio", format="mp3", elem_id="midi_audio")
|
|
|
238 |
output_midi = gr.File(label="output midi", file_types=[".mid"])
|
239 |
+
run_event = run_btn.click(GenerateMIDI, [input_num_tokens, input_drums, input_instrument],
|
240 |
+
[output_midi_seq, output_midi, output_audio, js_msg])
|
241 |
+
interrupt_btn.click(cancel_run, output_midi_seq, [output_midi, output_audio, js_msg],
|
242 |
+
cancels=run_event, queue=False)
|
243 |
+
app.queue(concurrency_count=1).launch(server_port=opt.port, share=opt.share, inbrowser=True)
|
midi_synthesizer.py
CHANGED
@@ -19,12 +19,8 @@ def synthesis(midi_opus, soundfont_path, sample_rate=44100):
|
|
19 |
fl = fluidsynth.Synth(samplerate=float(sample_rate))
|
20 |
sfid = fl.sfload(soundfont_path)
|
21 |
last_t = 0
|
22 |
-
list_of_MIDI_patches=[0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0]
|
23 |
for c in range(16):
|
24 |
-
if c == 9
|
25 |
-
fl.program_select(c, sfid, 128, 0)
|
26 |
-
else:
|
27 |
-
fl.program_select(c, sfid, 0, list_of_MIDI_patches[c])
|
28 |
for event in event_list:
|
29 |
name = event[0]
|
30 |
sample_len = int(((event[1] / ticks_per_beat) * tempo / (10 ** 6)) * sample_rate)
|
@@ -37,10 +33,7 @@ def synthesis(midi_opus, soundfont_path, sample_rate=44100):
|
|
37 |
tempo = event[2]
|
38 |
elif name == "patch_change":
|
39 |
c, p = event[2:4]
|
40 |
-
if c == 9
|
41 |
-
fl.program_select(c, sfid, 128, 0)
|
42 |
-
else:
|
43 |
-
fl.program_select(c, sfid, 0, p)
|
44 |
elif name == "control_change":
|
45 |
c, cc, v = event[2:5]
|
46 |
fl.cc(c, cc, v)
|
|
|
19 |
fl = fluidsynth.Synth(samplerate=float(sample_rate))
|
20 |
sfid = fl.sfload(soundfont_path)
|
21 |
last_t = 0
|
|
|
22 |
for c in range(16):
|
23 |
+
fl.program_select(c, sfid, 128 if c == 9 else 0, 0)
|
|
|
|
|
|
|
24 |
for event in event_list:
|
25 |
name = event[0]
|
26 |
sample_len = int(((event[1] / ticks_per_beat) * tempo / (10 ** 6)) * sample_rate)
|
|
|
33 |
tempo = event[2]
|
34 |
elif name == "patch_change":
|
35 |
c, p = event[2:4]
|
36 |
+
fl.program_select(c, sfid, 128 if c == 9 else 0, p)
|
|
|
|
|
|
|
37 |
elif name == "control_change":
|
38 |
c, cc, v = event[2:5]
|
39 |
fl.cc(c, cc, v)
|