Update app.py
Browse files
app.py
CHANGED
@@ -395,6 +395,66 @@ def get_zip_download_link(zip_file):
|
|
395 |
href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
396 |
return href
|
397 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
398 |
def main():
|
399 |
|
400 |
st.title("DromeLlama7B")
|
@@ -410,7 +470,6 @@ def main():
|
|
410 |
except:
|
411 |
st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
412 |
|
413 |
-
|
414 |
openai.api_key = os.getenv('OPENAI_KEY')
|
415 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
416 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
@@ -511,7 +570,6 @@ def main():
|
|
511 |
create_file(filename, user_prompt, response, should_save)
|
512 |
st.experimental_rerun()
|
513 |
|
514 |
-
|
515 |
# Feedback
|
516 |
# Step: Give User a Way to Upvote or Downvote
|
517 |
feedback = st.radio("Step 8: Give your feedback", ("👍 Upvote", "👎 Downvote"))
|
@@ -520,89 +578,28 @@ def main():
|
|
520 |
else:
|
521 |
st.write("You downvoted 👎. Thank you for your feedback!")
|
522 |
|
523 |
-
|
524 |
-
|
525 |
-
st.
|
526 |
-
st.
|
527 |
-
user_question
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
create_file(filename, raw, '', should_save)
|
543 |
|
544 |
if __name__ == "__main__":
|
|
|
545 |
main()
|
546 |
|
547 |
|
548 |
-
|
549 |
-
def whisper(filename):
|
550 |
-
with open(filename, "rb") as f:
|
551 |
-
data = f.read
|
552 |
-
#try:
|
553 |
-
response = requests.post(WHISPER_API_URL, headers=WHISPER_headers, data=data)
|
554 |
-
#except:
|
555 |
-
# st.write('Whisper Voice Speech to Text Model is asleep. Starting up now on T4 - please give 3 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
556 |
-
return response.json()
|
557 |
-
|
558 |
-
def whisper_generate_filename(prompt, file_type):
|
559 |
-
central = pytz.timezone('US/Central')
|
560 |
-
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
|
561 |
-
replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
|
562 |
-
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
563 |
-
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
564 |
-
|
565 |
-
def whisper_save_and_play_audio(audio_recorder):
|
566 |
-
audio_bytes = audio_recorder()
|
567 |
-
if audio_bytes:
|
568 |
-
filename = whisper_generate_filename("Recording", "wav")
|
569 |
-
with open(filename, 'wb') as f:
|
570 |
-
f.write(audio_bytes)
|
571 |
-
st.audio(audio_bytes, format="audio/wav")
|
572 |
-
return filename
|
573 |
-
|
574 |
-
def whisper_transcribe_audio(filename):
|
575 |
-
output = whisper(filename)
|
576 |
-
return output
|
577 |
-
|
578 |
-
def whisper_save_transcription(transcription):
|
579 |
-
with open(file_path, 'a') as f:
|
580 |
-
f.write(f"{transcription}\n")
|
581 |
-
|
582 |
-
def whisper_load_previous_transcriptions():
|
583 |
-
if os.path.exists(file_path):
|
584 |
-
with open(file_path, 'r') as f:
|
585 |
-
return f.read()
|
586 |
-
return ""
|
587 |
-
|
588 |
-
def whisper_main():
|
589 |
-
st.title("Speech to Text 🎤📝")
|
590 |
-
st.write("Record your speech and get the text. 🗨️")
|
591 |
-
|
592 |
-
previous_transcriptions = whisper_load_previous_transcriptions()
|
593 |
-
text_area = st.text_area("Transcriptions:", previous_transcriptions, height=400)
|
594 |
-
|
595 |
-
filename = whisper_save_and_play_audio(audio_recorder)
|
596 |
-
if filename is not None:
|
597 |
-
try:
|
598 |
-
transcription = whisper_transcribe_audio(filename)
|
599 |
-
|
600 |
-
# Update the text area with new transcription
|
601 |
-
updated_transcriptions = f"{previous_transcriptions}\n{transcription}"
|
602 |
-
st.text_area("Transcriptions:", updated_transcriptions, height=400)
|
603 |
-
|
604 |
-
# Save the new transcription to file
|
605 |
-
whisper_save_transcription(transcription)
|
606 |
-
except:
|
607 |
-
st.write('Whisperer loading..')
|
608 |
-
|
|
|
395 |
href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
396 |
return href
|
397 |
|
398 |
+
def whisper(filename):
|
399 |
+
with open(filename, "rb") as f:
|
400 |
+
data = f.read
|
401 |
+
#try:
|
402 |
+
response = requests.post(WHISPER_API_URL, headers=WHISPER_headers, data=data)
|
403 |
+
#except:
|
404 |
+
# st.write('Whisper Voice Speech to Text Model is asleep. Starting up now on T4 - please give 3 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
405 |
+
return response.json()
|
406 |
+
|
407 |
+
def whisper_generate_filename(prompt, file_type):
|
408 |
+
central = pytz.timezone('US/Central')
|
409 |
+
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
|
410 |
+
replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
|
411 |
+
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
412 |
+
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
413 |
+
|
414 |
+
def whisper_save_and_play_audio(audio_recorder):
|
415 |
+
audio_bytes = audio_recorder()
|
416 |
+
if audio_bytes:
|
417 |
+
filename = whisper_generate_filename("Recording", "wav")
|
418 |
+
with open(filename, 'wb') as f:
|
419 |
+
f.write(audio_bytes)
|
420 |
+
st.audio(audio_bytes, format="audio/wav")
|
421 |
+
return filename
|
422 |
+
|
423 |
+
def whisper_transcribe_audio(filename):
|
424 |
+
output = whisper(filename)
|
425 |
+
return output
|
426 |
+
|
427 |
+
def whisper_save_transcription(transcription):
|
428 |
+
with open(file_path, 'a') as f:
|
429 |
+
f.write(f"{transcription}\n")
|
430 |
+
|
431 |
+
def whisper_load_previous_transcriptions():
|
432 |
+
if os.path.exists(file_path):
|
433 |
+
with open(file_path, 'r') as f:
|
434 |
+
return f.read()
|
435 |
+
return ""
|
436 |
+
|
437 |
+
def whisper_main():
|
438 |
+
st.title("Speech to Text 🎤📝")
|
439 |
+
st.write("Record your speech and get the text. 🗨️")
|
440 |
+
|
441 |
+
previous_transcriptions = whisper_load_previous_transcriptions()
|
442 |
+
text_area = st.text_area("Transcriptions:", previous_transcriptions, height=400)
|
443 |
+
|
444 |
+
filename = whisper_save_and_play_audio(audio_recorder)
|
445 |
+
if filename is not None:
|
446 |
+
try:
|
447 |
+
transcription = whisper_transcribe_audio(filename)
|
448 |
+
|
449 |
+
# Update the text area with new transcription
|
450 |
+
updated_transcriptions = f"{previous_transcriptions}\n{transcription}"
|
451 |
+
st.text_area("Transcriptions:", updated_transcriptions, height=400)
|
452 |
+
|
453 |
+
# Save the new transcription to file
|
454 |
+
whisper_save_transcription(transcription)
|
455 |
+
except:
|
456 |
+
st.write('Whisperer loading..')
|
457 |
+
|
458 |
def main():
|
459 |
|
460 |
st.title("DromeLlama7B")
|
|
|
470 |
except:
|
471 |
st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
472 |
|
|
|
473 |
openai.api_key = os.getenv('OPENAI_KEY')
|
474 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
475 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
|
|
570 |
create_file(filename, user_prompt, response, should_save)
|
571 |
st.experimental_rerun()
|
572 |
|
|
|
573 |
# Feedback
|
574 |
# Step: Give User a Way to Upvote or Downvote
|
575 |
feedback = st.radio("Step 8: Give your feedback", ("👍 Upvote", "👎 Downvote"))
|
|
|
578 |
else:
|
579 |
st.write("You downvoted 👎. Thank you for your feedback!")
|
580 |
|
581 |
+
load_dotenv()
|
582 |
+
st.write(css, unsafe_allow_html=True)
|
583 |
+
st.header("Chat with documents :books:")
|
584 |
+
user_question = st.text_input("Ask a question about your documents:")
|
585 |
+
if user_question:
|
586 |
+
process_user_input(user_question)
|
587 |
+
with st.sidebar:
|
588 |
+
st.subheader("Your documents")
|
589 |
+
docs = st.file_uploader("import documents", accept_multiple_files=True)
|
590 |
+
with st.spinner("Processing"):
|
591 |
+
raw = pdf2txt(docs)
|
592 |
+
if len(raw) > 0:
|
593 |
+
length = str(len(raw))
|
594 |
+
text_chunks = txt2chunks(raw)
|
595 |
+
vectorstore = vector_store(text_chunks)
|
596 |
+
st.session_state.conversation = get_chain(vectorstore)
|
597 |
+
st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
|
598 |
+
filename = generate_filename(raw, 'txt')
|
599 |
+
create_file(filename, raw, '', should_save)
|
|
|
600 |
|
601 |
if __name__ == "__main__":
|
602 |
+
whisper_main()
|
603 |
main()
|
604 |
|
605 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|