owiedotch commited on
Commit
8ad4b15
·
verified ·
1 Parent(s): e173c02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -24,15 +24,23 @@ def encode_audio(audio_path):
24
  tokens = tokens.cpu().numpy()
25
 
26
  # Save to a temporary file
27
- with tempfile.NamedTemporaryFile(suffix='.oterin', delete=False) as tmp_file:
28
- np.save(tmp_file.name, tokens)
29
- token_path = tmp_file.name
30
 
31
- return token_path, f"Encoded to {len(tokens)} tokens"
 
 
 
 
32
 
33
  @spaces.GPU(duration=60)
34
  def decode_tokens(token_path):
35
  """Decode tokens to audio"""
 
 
 
 
36
  tokens = np.load(token_path)
37
  # Convert to torch tensor if needed by the model
38
  if hasattr(semanticodec, 'decode_requires_tensor') and semanticodec.decode_requires_tensor:
@@ -47,6 +55,10 @@ def decode_tokens(token_path):
47
  sf.write(output_buffer, waveform[0, 0], 32000, format='WAV')
48
  output_buffer.seek(0)
49
 
 
 
 
 
50
  return output_buffer, f"Decoded {len(tokens)} tokens to audio"
51
 
52
  @spaces.GPU(duration=80)
@@ -69,6 +81,10 @@ def process_both(audio_path):
69
  sf.write(output_buffer, waveform[0, 0], 32000, format='WAV')
70
  output_buffer.seek(0)
71
 
 
 
 
 
72
  return output_buffer, f"Encoded to {len(tokens)} tokens\nDecoded {len(tokens)} tokens to audio"
73
 
74
  # Create Gradio interface
 
24
  tokens = tokens.cpu().numpy()
25
 
26
  # Save to a temporary file
27
+ temp_file = tempfile.NamedTemporaryFile(suffix='.oterin', delete=False)
28
+ temp_file.close() # Close the file before writing to it
29
+ np.save(temp_file.name, tokens)
30
 
31
+ # Ensure the file exists and has content
32
+ if os.path.exists(temp_file.name) and os.path.getsize(temp_file.name) > 0:
33
+ return temp_file.name, f"Encoded to {len(tokens)} tokens"
34
+ else:
35
+ raise Exception("Failed to create token file")
36
 
37
  @spaces.GPU(duration=60)
38
  def decode_tokens(token_path):
39
  """Decode tokens to audio"""
40
+ # Ensure the file exists and has content
41
+ if not os.path.exists(token_path) or os.path.getsize(token_path) == 0:
42
+ return None, "Error: Empty or missing token file"
43
+
44
  tokens = np.load(token_path)
45
  # Convert to torch tensor if needed by the model
46
  if hasattr(semanticodec, 'decode_requires_tensor') and semanticodec.decode_requires_tensor:
 
55
  sf.write(output_buffer, waveform[0, 0], 32000, format='WAV')
56
  output_buffer.seek(0)
57
 
58
+ # Verify the buffer has content
59
+ if output_buffer.getbuffer().nbytes == 0:
60
+ return None, "Error: Failed to generate audio"
61
+
62
  return output_buffer, f"Decoded {len(tokens)} tokens to audio"
63
 
64
  @spaces.GPU(duration=80)
 
81
  sf.write(output_buffer, waveform[0, 0], 32000, format='WAV')
82
  output_buffer.seek(0)
83
 
84
+ # Verify the buffer has content
85
+ if output_buffer.getbuffer().nbytes == 0:
86
+ return None, "Error: Failed to generate audio"
87
+
88
  return output_buffer, f"Encoded to {len(tokens)} tokens\nDecoded {len(tokens)} tokens to audio"
89
 
90
  # Create Gradio interface