Spaces:
Running
on
Zero
Running
on
Zero
fix yield delta time
Browse files- app.py +2 -2
- app_onnx.py +2 -2
app.py
CHANGED
@@ -232,14 +232,14 @@ def run(model_name, tab, mid_seq, continuation_state, continuation_select, instr
|
|
232 |
disable_control_change=not allow_cc, disable_channels=disable_channels,
|
233 |
generator=generator)
|
234 |
events = [list() for i in range(OUTPUT_BATCH_SIZE)]
|
235 |
-
t = time.time()
|
236 |
for i, token_seqs in enumerate(midi_generator):
|
237 |
token_seqs = token_seqs.tolist()
|
238 |
for j in range(OUTPUT_BATCH_SIZE):
|
239 |
token_seq = token_seqs[j]
|
240 |
mid_seq[j].append(token_seq)
|
241 |
events[j].append(tokenizer.tokens2event(token_seq))
|
242 |
-
if time.time() - t > 0.
|
243 |
msgs = [create_msg("progress", [i + 1, gen_events])]
|
244 |
for j in range(OUTPUT_BATCH_SIZE):
|
245 |
msgs += [create_msg("visualizer_append", [j, events[j]])]
|
|
|
232 |
disable_control_change=not allow_cc, disable_channels=disable_channels,
|
233 |
generator=generator)
|
234 |
events = [list() for i in range(OUTPUT_BATCH_SIZE)]
|
235 |
+
t = time.time() + 1
|
236 |
for i, token_seqs in enumerate(midi_generator):
|
237 |
token_seqs = token_seqs.tolist()
|
238 |
for j in range(OUTPUT_BATCH_SIZE):
|
239 |
token_seq = token_seqs[j]
|
240 |
mid_seq[j].append(token_seq)
|
241 |
events[j].append(tokenizer.tokens2event(token_seq))
|
242 |
+
if time.time() - t > 0.5:
|
243 |
msgs = [create_msg("progress", [i + 1, gen_events])]
|
244 |
for j in range(OUTPUT_BATCH_SIZE):
|
245 |
msgs += [create_msg("visualizer_append", [j, events[j]])]
|
app_onnx.py
CHANGED
@@ -261,14 +261,14 @@ def run(model_name, tab, mid_seq, continuation_state, continuation_select, instr
|
|
261 |
disable_control_change=not allow_cc, disable_channels=disable_channels,
|
262 |
generator=generator)
|
263 |
events = [list() for i in range(OUTPUT_BATCH_SIZE)]
|
264 |
-
t = time.time()
|
265 |
for i, token_seqs in enumerate(midi_generator):
|
266 |
token_seqs = token_seqs.tolist()
|
267 |
for j in range(OUTPUT_BATCH_SIZE):
|
268 |
token_seq = token_seqs[j]
|
269 |
mid_seq[j].append(token_seq)
|
270 |
events[j].append(tokenizer.tokens2event(token_seq))
|
271 |
-
if time.time() - t > 0.
|
272 |
msgs = [create_msg("progress", [i + 1, gen_events])]
|
273 |
for j in range(OUTPUT_BATCH_SIZE):
|
274 |
msgs += [create_msg("visualizer_append", [j, events[j]])]
|
|
|
261 |
disable_control_change=not allow_cc, disable_channels=disable_channels,
|
262 |
generator=generator)
|
263 |
events = [list() for i in range(OUTPUT_BATCH_SIZE)]
|
264 |
+
t = time.time() + 1
|
265 |
for i, token_seqs in enumerate(midi_generator):
|
266 |
token_seqs = token_seqs.tolist()
|
267 |
for j in range(OUTPUT_BATCH_SIZE):
|
268 |
token_seq = token_seqs[j]
|
269 |
mid_seq[j].append(token_seq)
|
270 |
events[j].append(tokenizer.tokens2event(token_seq))
|
271 |
+
if time.time() - t > 0.5:
|
272 |
msgs = [create_msg("progress", [i + 1, gen_events])]
|
273 |
for j in range(OUTPUT_BATCH_SIZE):
|
274 |
msgs += [create_msg("visualizer_append", [j, events[j]])]
|