Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,171 +1,14 @@
|
|
1 |
-
import torch
|
2 |
-
import time
|
3 |
-
import moviepy.editor as mp
|
4 |
-
import psutil
|
5 |
import gradio as gr
|
6 |
-
import
|
7 |
-
|
8 |
-
from transformers.pipelines.audio_utils import ffmpeg_read
|
9 |
-
|
10 |
-
DEFAULT_MODEL_NAME = "distil-whisper/distil-large-v3"
|
11 |
-
BATCH_SIZE = 8
|
12 |
-
|
13 |
-
device = 0 if torch.cuda.is_available() else "cpu"
|
14 |
-
if device == "cpu":
|
15 |
-
DEFAULT_MODEL_NAME = "openai/whisper-tiny"
|
16 |
-
|
17 |
-
def load_pipeline(model_name):
|
18 |
-
return pipeline(
|
19 |
-
task="automatic-speech-recognition",
|
20 |
-
model=model_name,
|
21 |
-
chunk_length_s=30,
|
22 |
-
device=device,
|
23 |
-
)
|
24 |
-
|
25 |
-
pipe = load_pipeline(DEFAULT_MODEL_NAME)
|
26 |
-
|
27 |
-
@spaces.GPU
|
28 |
-
def transcribe(inputs, task, model_name):
|
29 |
-
if inputs is None:
|
30 |
-
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
31 |
-
|
32 |
-
global pipe
|
33 |
-
if model_name != pipe.model.name_or_path:
|
34 |
-
pipe = load_pipeline(model_name)
|
35 |
-
|
36 |
-
start_time = time.time() # Record the start time
|
37 |
-
|
38 |
-
# Load the audio file and calculate its duration
|
39 |
-
audio = mp.AudioFileClip(inputs)
|
40 |
-
audio_duration = audio.duration
|
41 |
-
|
42 |
-
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
43 |
-
end_time = time.time() # Record the end time
|
44 |
-
|
45 |
-
transcription_time = end_time - start_time # Calculate the transcription time
|
46 |
-
|
47 |
-
# Create the transcription time output with additional information
|
48 |
-
transcription_time_output = (
|
49 |
-
f"Transcription Time: {transcription_time:.2f} seconds\n"
|
50 |
-
f"Audio Duration: {audio_duration:.2f} seconds\n"
|
51 |
-
f"Model Used: {model_name}\n"
|
52 |
-
f"Device Used: {'GPU' if torch.cuda.is_available() else 'CPU'}"
|
53 |
-
)
|
54 |
-
|
55 |
-
return text, transcription_time_output
|
56 |
-
|
57 |
-
from gpustat import GPUStatCollection
|
58 |
-
|
59 |
-
def update_gpu_status():
|
60 |
-
if torch.cuda.is_available() == False:
|
61 |
-
return "No Nviadia Device"
|
62 |
-
try:
|
63 |
-
gpu_stats = GPUStatCollection.new_query()
|
64 |
-
for gpu in gpu_stats:
|
65 |
-
# Assuming you want to monitor the first GPU, index 0
|
66 |
-
gpu_id = gpu.index
|
67 |
-
gpu_name = gpu.name
|
68 |
-
gpu_utilization = gpu.utilization
|
69 |
-
memory_used = gpu.memory_used
|
70 |
-
memory_total = gpu.memory_total
|
71 |
-
memory_utilization = (memory_used / memory_total) * 100
|
72 |
-
gpu_status=(f"GPU {gpu_id}: {gpu_name}, Utilization: {gpu_utilization}%, Memory Used: {memory_used}MB, Memory Total: {memory_total}MB, Memory Utilization: {memory_utilization:.2f}%")
|
73 |
-
return gpu_status
|
74 |
-
|
75 |
-
except Exception as e:
|
76 |
-
print(f"Error getting GPU stats: {e}")
|
77 |
-
return torch_update_gpu_status()
|
78 |
-
|
79 |
-
def torch_update_gpu_status():
|
80 |
-
if torch.cuda.is_available():
|
81 |
-
gpu_info = torch.cuda.get_device_name(0)
|
82 |
-
gpu_memory = torch.cuda.mem_get_info(0)
|
83 |
-
total_memory = gpu_memory[1] / (1024 * 1024)
|
84 |
-
used_memory = (gpu_memory[1] - gpu_memory[0]) / (1024 * 1024)
|
85 |
-
|
86 |
-
gpu_status = f"GPU: {gpu_info}\nTotal Memory: {total_memory:.2f} MB\nUsed Memory: {used_memory:.2f} MB"
|
87 |
-
else:
|
88 |
-
gpu_status = "No GPU available"
|
89 |
-
return gpu_status
|
90 |
-
|
91 |
-
def update_cpu_status():
|
92 |
-
import datetime
|
93 |
-
# Get the current time
|
94 |
-
current_time = datetime.datetime.now().time()
|
95 |
-
# Convert the time to a string
|
96 |
-
time_str = current_time.strftime("%H:%M:%S")
|
97 |
-
|
98 |
-
cpu_percent = psutil.cpu_percent()
|
99 |
-
cpu_status = f"CPU Usage: {cpu_percent}% {time_str}"
|
100 |
-
return cpu_status
|
101 |
-
|
102 |
-
def update_status():
|
103 |
-
gpu_status = update_gpu_status()
|
104 |
-
cpu_status = update_cpu_status()
|
105 |
-
return gpu_status, cpu_status
|
106 |
-
|
107 |
-
def refresh_status():
|
108 |
-
return update_status()
|
109 |
|
110 |
-
demo = gr.Blocks()
|
111 |
|
112 |
-
|
113 |
-
fn=transcribe,
|
114 |
-
inputs=[
|
115 |
-
gr.Audio(type="filepath"),
|
116 |
-
gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
117 |
-
gr.Textbox(
|
118 |
-
label="Model Name",
|
119 |
-
value=DEFAULT_MODEL_NAME,
|
120 |
-
placeholder="Enter the model name",
|
121 |
-
info="Some available models: distil-whisper/distil-large-v3 distil-whisper/distil-medium.en Systran/faster-distil-whisper-large-v3 Systran/faster-whisper-large-v3 Systran/faster-whisper-medium openai/whisper-tiny, openai/whisper-base, openai/whisper-medium, openai/whisper-large-v3",
|
122 |
-
),
|
123 |
-
],
|
124 |
-
outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
|
125 |
-
theme="huggingface",
|
126 |
-
title="Whisper Transcription",
|
127 |
-
description=(
|
128 |
-
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the specified OpenAI Whisper"
|
129 |
-
" checkpoint and 🤗 Transformers to transcribe audio files of arbitrary length."
|
130 |
-
),
|
131 |
-
allow_flagging="never",
|
132 |
-
)
|
133 |
|
134 |
-
file_transcribe = gr.Interface(
|
135 |
-
fn=transcribe,
|
136 |
-
inputs=[
|
137 |
-
gr.Audio(type="filepath", label="Audio file"),
|
138 |
-
gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
139 |
-
gr.Textbox(
|
140 |
-
label="Model Name",
|
141 |
-
value=DEFAULT_MODEL_NAME,
|
142 |
-
placeholder="Enter the model name",
|
143 |
-
info="Some available models: openai/whisper-tiny, openai/whisper-base, openai/whisper-medium, openai/whisper-large-v2",
|
144 |
-
),
|
145 |
-
],
|
146 |
-
outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
|
147 |
-
theme="huggingface",
|
148 |
-
title="Whisper Transcription",
|
149 |
-
description=(
|
150 |
-
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the specified OpenAI Whisper"
|
151 |
-
" checkpoint and 🤗 Transformers to transcribe audio files of arbitrary length."
|
152 |
-
),
|
153 |
-
allow_flagging="never",
|
154 |
-
)
|
155 |
-
with demo:
|
156 |
-
gr.TabbedInterface([mf_transcribe, file_transcribe], ["Microphone", "Audio file"])
|
157 |
-
|
158 |
-
with gr.Row():
|
159 |
-
refresh_button = gr.Button("Refresh Status") # Create a refresh button
|
160 |
-
|
161 |
-
gpu_status_output = gr.Textbox(label="GPU Status", interactive=False)
|
162 |
-
cpu_status_output = gr.Textbox(label="CPU Status", interactive=False)
|
163 |
-
|
164 |
-
# Link the refresh button to the refresh_status function
|
165 |
-
refresh_button.click(refresh_status, None, [gpu_status_output, cpu_status_output])
|
166 |
|
167 |
-
|
168 |
-
|
|
|
169 |
|
170 |
-
|
171 |
-
demo.launch(
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
|
|
5 |
|
6 |
+
graudio=gr.Audio(type="filepath",show_download_button=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
demo = gr.Interface(fake_diffusion,
|
10 |
+
inputs=[graudio],
|
11 |
+
outputs="image")
|
12 |
|
13 |
+
if __name__ == "__main__":
|
14 |
+
demo.launch()
|