terry-li-hm commited on
Commit
b8c0ef3
·
1 Parent(s): 1561f9d
Files changed (1) hide show
  1. app.py +21 -8
app.py CHANGED
@@ -6,6 +6,7 @@ import soundfile as sf
6
  import spaces
7
  import torch
8
  import torchaudio
 
9
  from sv import process_audio
10
 
11
 
@@ -34,15 +35,21 @@ def model_inference(input_wav, language):
34
 
35
 
36
  def launch():
37
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
38
  gr.Markdown("# Cantonese Call Transcriber")
39
  gr.Markdown("## Try an example:")
40
 
41
  # Define components
42
- audio_inputs = gr.Audio(label="Input")
43
- text_outputs = gr.Textbox(lines=10, label="Output")
44
 
45
- # Place the Examples component above the input and output
46
  gr.Examples(
47
  examples=[["example/scb.mp3"]],
48
  inputs=[audio_inputs],
@@ -51,15 +58,21 @@ def launch():
51
  examples_per_page=1,
52
  )
53
 
 
54
  with gr.Row():
55
  with gr.Column(scale=2):
56
- # Audio input is already defined, just reference it here
57
- audio_inputs
58
  fn_button = gr.Button("Process Audio", variant="primary")
59
 
60
  with gr.Column(scale=3):
61
- # Text output is already defined, just reference it here
62
- text_outputs
 
 
 
 
 
 
63
 
64
  demo.launch()
65
 
 
6
  import spaces
7
  import torch
8
  import torchaudio
9
+ from gradio.themes.utils import colors
10
  from sv import process_audio
11
 
12
 
 
35
 
36
 
37
  def launch():
38
+ # Create a custom orange theme
39
+ orange_theme = gr.themes.Default().set(
40
+ primary_hue=colors.orange,
41
+ secondary_hue=colors.stone,
42
+ )
43
+
44
+ with gr.Blocks(theme=orange_theme) as demo:
45
  gr.Markdown("# Cantonese Call Transcriber")
46
  gr.Markdown("## Try an example:")
47
 
48
  # Define components
49
+ audio_inputs = gr.Audio(label="Input", visible=False)
50
+ text_outputs = gr.Textbox(lines=10, label="Output", visible=False)
51
 
52
+ # Place the Examples component first
53
  gr.Examples(
54
  examples=[["example/scb.mp3"]],
55
  inputs=[audio_inputs],
 
58
  examples_per_page=1,
59
  )
60
 
61
+ # Main interface
62
  with gr.Row():
63
  with gr.Column(scale=2):
64
+ gr.Audio(label="Input")
 
65
  fn_button = gr.Button("Process Audio", variant="primary")
66
 
67
  with gr.Column(scale=3):
68
+ gr.Textbox(lines=10, label="Output")
69
+
70
+ # Set up event handler
71
+ fn_button.click(
72
+ fn=lambda x: model_inference(x, "yue"),
73
+ inputs=[audio_inputs],
74
+ outputs=[text_outputs],
75
+ )
76
 
77
  demo.launch()
78