Tonic commited on
Commit
798b27c
·
1 Parent(s): be14c17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -1,20 +1,17 @@
1
-
2
  import streamlit as st
3
  import sounddevice as sd
4
  import numpy as np
5
  import wave
6
- import gradio as gr
7
- import numpy as np
8
- from audiorecorder import audiorecorder
9
- import whisper
10
- import os
11
- import streamlit.components.v1 as components
12
- import tempfile
13
- import io
14
- import requests
15
- import json
16
  import openai
17
 
 
18
  def chunk_text(text, chunk_size=2000):
19
  chunks = []
20
  start = 0
@@ -46,10 +43,12 @@ if record_audio:
46
  if any(indata):
47
  audio_frames.append(indata.copy())
48
 
 
 
 
49
  with st.spinner("Recording..."):
50
  with sd.InputStream(callback=audio_callback):
51
  st.text("Recording audio. Click 'Stop Recording' when finished.")
52
- st.button("Stop Recording")
53
 
54
  st.success("Recording stopped")
55
 
@@ -61,7 +60,8 @@ if record_audio:
61
  wf.setframerate(44100)
62
  wf.writeframes(audio_data.tobytes())
63
 
64
- if submit_button:
 
65
  model = whisper.load_model("base")
66
  audio_data = audio.export().read()
67
  audio_bytes_io = io.BytesIO(audio_data)
@@ -131,6 +131,5 @@ if userinput and api_key and st.button("Extract Claims", key="claims_extraction"
131
  # Display generated objectives for all chunks
132
  learning_status_placeholder.text(f"Patentable Claims Extracted!\n{all_extracted_claims.strip()}")
133
 
134
-
135
  # Citation
136
  st.markdown("<sub>This app was created by [Taylor Ennen](https://github.com/taylor-ennen/GPT-Streamlit-MVP) & [Tonic](https://huggingface.co/tonic)</sub>", unsafe_allow_html=True)
 
 
1
  import streamlit as st
2
  import sounddevice as sd
3
  import numpy as np
4
  import wave
5
+ import whisper
6
+ import os
7
+ import streamlit.components.v1 as components
8
+ import tempfile
9
+ import io
10
+ import requests
11
+ import json
 
 
 
12
  import openai
13
 
14
+ # Define a function to split text into chunks
15
  def chunk_text(text, chunk_size=2000):
16
  chunks = []
17
  start = 0
 
43
  if any(indata):
44
  audio_frames.append(indata.copy())
45
 
46
+ if st.button("Stop Recording"): # Moved this button here to stop audio recording
47
+ sd.stop()
48
+
49
  with st.spinner("Recording..."):
50
  with sd.InputStream(callback=audio_callback):
51
  st.text("Recording audio. Click 'Stop Recording' when finished.")
 
52
 
53
  st.success("Recording stopped")
54
 
 
60
  wf.setframerate(44100)
61
  wf.writeframes(audio_data.tobytes())
62
 
63
+ # Moved the submit_button check here
64
+ if 'submit_button' in st.session_state:
65
  model = whisper.load_model("base")
66
  audio_data = audio.export().read()
67
  audio_bytes_io = io.BytesIO(audio_data)
 
131
  # Display generated objectives for all chunks
132
  learning_status_placeholder.text(f"Patentable Claims Extracted!\n{all_extracted_claims.strip()}")
133
 
 
134
  # Citation
135
  st.markdown("<sub>This app was created by [Taylor Ennen](https://github.com/taylor-ennen/GPT-Streamlit-MVP) & [Tonic](https://huggingface.co/tonic)</sub>", unsafe_allow_html=True)