j-tobias commited on
Commit
b5983bd
β€’
1 Parent(s): afe419d

added wave and spectogram

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +69 -16
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Heartbeat
3
- emoji: πŸš€
4
  colorFrom: purple
5
  colorTo: blue
6
  sdk: gradio
 
1
  ---
2
  title: Heartbeat
3
+ emoji: πŸ’œ
4
  colorFrom: purple
5
  colorTo: blue
6
  sdk: gradio
app.py CHANGED
@@ -1,55 +1,107 @@
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import librosa
4
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def analyze(audio:gr.Audio):
8
  # Extract audio data and sample rate
9
- sr, audio_data = audio
10
 
11
- # Ensure audio_data is a numpy array
12
- if not isinstance(audio_data, np.ndarray):
13
- audio_data = np.array(audio_data)
14
 
15
  # Check if audio is mono or stereo
16
- if len(audio_data.shape) > 1:
17
  # If stereo, convert to mono by averaging channels
18
- audio_data = np.mean(audio_data, axis=1)
19
 
20
- audio_data = np.astype(audio_data, np.float16)
21
 
22
  # Now you have:
23
- # - audio_data: a 1D numpy array containing the audio samples
24
  # - sr: the sample rate of the audio
25
 
26
  # Your analysis code goes here
27
  # For example, you could print basic information:
28
- print(f"Audio length: {len(audio_data) / sr:.2f} seconds")
29
  print(f"Sample rate: {sr} Hz")
30
 
31
- zcr = librosa.feature.zero_crossing_rate(audio_data)[0]
32
  print(f"Mean Zero Crossing Rate: {np.mean(zcr):.4f}")
33
 
34
  # Calculate RMS Energy
35
- rms = librosa.feature.rms(y=audio_data)[0]
36
  print(f"Mean RMS Energy: {np.mean(rms):.4f}")
37
 
38
- # Return your analysis results
 
39
 
 
40
  results = f"""
41
- - Audio length: {len(audio_data) / sr:.2f} seconds
42
  - Sample rate: {sr} Hz
43
  - Mean Zero Crossing Rate: {np.mean(zcr):.4f}
44
  - Mean RMS Energy: {np.mean(rms):.4f}
 
 
45
  """
46
- return results
47
 
48
 
49
 
50
  with gr.Blocks() as app:
51
 
52
- gr.Markdown("🚨 This Project is still in works")
53
 
54
  gr.Markdown("# Heartbeat")
55
  gr.Markdown("This App helps to analyze and extract Information from Heartbeats")
@@ -70,8 +122,9 @@ with gr.Blocks() as app:
70
  analyzebtn = gr.Button("analyze")
71
 
72
  results = gr.Markdown()
 
73
 
74
- analyzebtn.click(analyze, audiofile, results)
75
 
76
 
77
 
 
1
+ from plotly.subplots import make_subplots
2
+ import plotly.graph_objects as go
3
  import gradio as gr
4
  import numpy as np
5
  import librosa
6
 
7
 
8
+ def getBeats(audiodata:np.ndarray, sr:int):
9
+ # Compute onset envelope
10
+ onset_env = librosa.onset.onset_strength(y=audiodata, sr=sr)
11
+
12
+ # Detect beats
13
+ tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr)
14
+
15
+ # Convert beat frames to time
16
+ beattimes = librosa.frames_to_time(beats, sr=sr)
17
+
18
+ return tempo[0], beattimes
19
+
20
+ def plotCombined(audiodata, sr):
21
+ # Create subplots
22
+ fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1,
23
+ subplot_titles=('Audio Waveform', 'Spectrogram'))
24
+
25
+ # Waveform plot
26
+ time = (np.arange(0, len(audiodata)) / sr)*2
27
+ fig.add_trace(
28
+ go.Scatter(x=time, y=audiodata, mode='lines', name='Waveform', line=dict(color='blue', width=1)),
29
+ row=1, col=1
30
+ )
31
+
32
+ # Spectrogram plot
33
+ D = librosa.stft(audiodata)
34
+ S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
35
+ times = librosa.times_like(S_db)
36
+ freqs = librosa.fft_frequencies(sr=sr)
37
+
38
+ fig.add_trace(
39
+ go.Heatmap(z=S_db, x=times, y=freqs, colorscale='Viridis',
40
+ zmin=S_db.min(), zmax=S_db.max(), colorbar=dict(title='Magnitude (dB)')),
41
+ row=2, col=1
42
+ )
43
+
44
+ # Update layout
45
+ fig.update_layout(
46
+ height=800, width=900,
47
+ title_text="Audio Analysis",
48
+ )
49
+ fig.update_xaxes(title_text="Time (s)", row=2, col=1)
50
+ fig.update_yaxes(title_text="Amplitude", row=1, col=1)
51
+ fig.update_yaxes(title_text="Frequency (Hz)", type="log", row=2, col=1)
52
+
53
+ return fig
54
 
55
  def analyze(audio:gr.Audio):
56
  # Extract audio data and sample rate
57
+ sr, audiodata = audio
58
 
59
+ # Ensure audiodata is a numpy array
60
+ if not isinstance(audiodata, np.ndarray):
61
+ audiodata = np.array(audiodata)
62
 
63
  # Check if audio is mono or stereo
64
+ if len(audiodata.shape) > 1:
65
  # If stereo, convert to mono by averaging channels
66
+ audiodata = np.mean(audiodata, axis=1)
67
 
68
+ audiodata = np.astype(audiodata, np.float16)
69
 
70
  # Now you have:
71
+ # - audiodata: a 1D numpy array containing the audio samples
72
  # - sr: the sample rate of the audio
73
 
74
  # Your analysis code goes here
75
  # For example, you could print basic information:
76
+ print(f"Audio length: {len(audiodata) / sr:.2f} seconds")
77
  print(f"Sample rate: {sr} Hz")
78
 
79
+ zcr = librosa.feature.zero_crossing_rate(audiodata)[0]
80
  print(f"Mean Zero Crossing Rate: {np.mean(zcr):.4f}")
81
 
82
  # Calculate RMS Energy
83
+ rms = librosa.feature.rms(y=audiodata)[0]
84
  print(f"Mean RMS Energy: {np.mean(rms):.4f}")
85
 
86
+ tempo, beattimes = getBeats(audiodata, sr)
87
+ spectogram_wave = plotCombined(audiodata, sr)
88
 
89
+ # Return your analysis results
90
  results = f"""
91
+ - Audio length: {len(audiodata) / sr:.2f} seconds
92
  - Sample rate: {sr} Hz
93
  - Mean Zero Crossing Rate: {np.mean(zcr):.4f}
94
  - Mean RMS Energy: {np.mean(rms):.4f}
95
+ - Tempo: {tempo}
96
+ - Beats: {beattimes}
97
  """
98
+ return results, spectogram_wave
99
 
100
 
101
 
102
  with gr.Blocks() as app:
103
 
104
+ gr.Markdown("# 🚨 This Project is still in works")
105
 
106
  gr.Markdown("# Heartbeat")
107
  gr.Markdown("This App helps to analyze and extract Information from Heartbeats")
 
122
  analyzebtn = gr.Button("analyze")
123
 
124
  results = gr.Markdown()
125
+ spectogram_wave = gr.Plot()
126
 
127
+ analyzebtn.click(analyze, audiofile, [results, spectogram_wave])
128
 
129
 
130