son9john
commited on
Commit
·
b8d8517
0
Parent(s):
Duplicate from son9john/US
Browse files- .gitattributes +34 -0
- README.md +13 -0
- app.py +142 -0
- packages.txt +1 -0
- requirements.txt +9 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: US
|
3 |
+
emoji: 🐨
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.19.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: son9john/US
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import gradio as gr
|
3 |
+
from gradio.components import Audio, Textbox
|
4 |
+
import os
|
5 |
+
import re
|
6 |
+
import tiktoken
|
7 |
+
from transformers import GPT2Tokenizer
|
8 |
+
import whisper
|
9 |
+
import pandas as pd
|
10 |
+
from datetime import datetime, timezone, timedelta
|
11 |
+
import notion_df
|
12 |
+
import concurrent.futures
|
13 |
+
|
14 |
+
# Define the tokenizer and model
|
15 |
+
tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium')
|
16 |
+
model = openai.api_key = os.environ["OPENAI_API_KEY"]
|
17 |
+
|
18 |
+
# Define the initial message and messages list
|
19 |
+
initial_message = {"role": "system", "content": 'You are a USMLE Tutor. Respond with ALWAYS layered "bullet points" (listing rather than sentences) to all input with a fun mneumonics to memorize that list. But you can answer up to 1200 words if the user requests longer response.'}
|
20 |
+
messages = [initial_message]
|
21 |
+
|
22 |
+
# Define the answer counter
|
23 |
+
answer_count = 0
|
24 |
+
|
25 |
+
# Define the Notion API key
|
26 |
+
API_KEY = os.environ["API_KEY"]
|
27 |
+
|
28 |
+
def transcribe(audio, text):
|
29 |
+
global messages
|
30 |
+
global answer_count
|
31 |
+
|
32 |
+
# Transcribe the audio if provided
|
33 |
+
if audio is not None:
|
34 |
+
audio_file = open(audio, "rb")
|
35 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file, language="en")
|
36 |
+
messages.append({"role": "user", "content": transcript["text"]})
|
37 |
+
|
38 |
+
# Tokenize the text input
|
39 |
+
if text is not None:
|
40 |
+
# Split the input text into sentences
|
41 |
+
sentences = re.split("(?<=[.!?]) +", text)
|
42 |
+
|
43 |
+
# Initialize a list to store the tokens
|
44 |
+
input_tokens = []
|
45 |
+
|
46 |
+
# Add each sentence to the input_tokens list
|
47 |
+
for sentence in sentences:
|
48 |
+
# Tokenize the sentence using the GPT-2 tokenizer
|
49 |
+
sentence_tokens = tokenizer.encode(sentence)
|
50 |
+
# Check if adding the sentence would exceed the token limit
|
51 |
+
if len(input_tokens) + len(sentence_tokens) < 1440:
|
52 |
+
# Add the sentence tokens to the input_tokens list
|
53 |
+
input_tokens.extend(sentence_tokens)
|
54 |
+
else:
|
55 |
+
# If adding the sentence would exceed the token limit, truncate it
|
56 |
+
sentence_tokens = sentence_tokens[:1440-len(input_tokens)]
|
57 |
+
input_tokens.extend(sentence_tokens)
|
58 |
+
break
|
59 |
+
# Decode the input tokens into text
|
60 |
+
input_text = tokenizer.decode(input_tokens)
|
61 |
+
|
62 |
+
# Add the input text to the messages list
|
63 |
+
messages.append({"role": "user", "content": input_text})
|
64 |
+
|
65 |
+
|
66 |
+
# Check if the accumulated tokens have exceeded 2096
|
67 |
+
num_tokens = sum(len(tokenizer.encode(message["content"])) for message in messages)
|
68 |
+
if num_tokens > 2096:
|
69 |
+
# Concatenate the chat history
|
70 |
+
chat_transcript = "\n\n".join([f"[ANSWER {answer_count}]{message['role']}: {message['content']}" for message in messages if message['role'] != 'system'])
|
71 |
+
|
72 |
+
# Append the number of tokens used to the end of the chat transcript
|
73 |
+
chat_transcript += f"\n\nNumber of tokens used: {num_tokens}\n\n"
|
74 |
+
|
75 |
+
# Get the current time in Eastern Time (ET)
|
76 |
+
now_et = datetime.now(timezone(timedelta(hours=-5)))
|
77 |
+
# Format the time as string (YY-MM-DD HH:MM)
|
78 |
+
published_date = now_et.strftime('%m-%d-%y %H:%M')
|
79 |
+
|
80 |
+
# Upload the chat transcript to Notion
|
81 |
+
df = pd.DataFrame([chat_transcript])
|
82 |
+
notion_df.upload(df, 'https://www.notion.so/US-62e861a0b35f43da8ef9a7789512b8c2?pvs=4', title=str(published_date+'FULL'), api_key=API_KEY)
|
83 |
+
|
84 |
+
# Reset the messages list and answer counter
|
85 |
+
messages = [initial_message]
|
86 |
+
answer_count = 0
|
87 |
+
else:
|
88 |
+
# Increment the answer counter
|
89 |
+
answer_count += 1
|
90 |
+
|
91 |
+
# Generate the system message using the OpenAI API
|
92 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
93 |
+
prompt = [{"text": f"{message['role']}: {message['content']}\n\n"} for message in messages]
|
94 |
+
system_message = openai.ChatCompletion.create(
|
95 |
+
model="gpt-3.5-turbo",
|
96 |
+
messages=messages,
|
97 |
+
max_tokens=2000
|
98 |
+
)["choices"][0]["message"]
|
99 |
+
# Wait for the completion of the OpenAI API call
|
100 |
+
|
101 |
+
# Add the system message to the messages list
|
102 |
+
messages.append(system_message)
|
103 |
+
|
104 |
+
# Concatenate the chat history
|
105 |
+
chat_transcript = "\n\n".join([f"[ANSWER {answer_count}]{message['role']}: {message['content']}" for message in messages if message['role'] != 'system'])
|
106 |
+
|
107 |
+
# Append the number of tokens used to the end of the chat transcript
|
108 |
+
chat_transcript += f"\n\nNumber of tokens used: {num_tokens}\n\n"
|
109 |
+
|
110 |
+
# Save the chat transcript to a file
|
111 |
+
with open("conversation_history.txt", "a") as f:
|
112 |
+
f.write(chat_transcript)
|
113 |
+
|
114 |
+
# Upload the chat transcript to Notion
|
115 |
+
now_et = datetime.now(timezone(timedelta(hours=-5)))
|
116 |
+
published_date = now_et.strftime('%m-%d-%y %H:%M')
|
117 |
+
df = pd.DataFrame([chat_transcript])
|
118 |
+
notion_df.upload(df, 'https://www.notion.so/US-62e861a0b35f43da8ef9a7789512b8c2?pvs=4', title=str(published_date), api_key=API_KEY)
|
119 |
+
|
120 |
+
# Return the chat transcript
|
121 |
+
return chat_transcript
|
122 |
+
|
123 |
+
# Define the input and output components for Gradio
|
124 |
+
audio_input = Audio(source="microphone", type="filepath", label="Record your message")
|
125 |
+
text_input = Textbox(label="Type your message", max_length=4096)
|
126 |
+
output_text = gr.outputs.Textbox(label="Response")
|
127 |
+
output_audio = Audio()
|
128 |
+
|
129 |
+
# Define the Gradio interface
|
130 |
+
iface = gr.Interface(
|
131 |
+
fn=transcribe,
|
132 |
+
inputs=[audio_input, text_input],
|
133 |
+
outputs=[output_text],
|
134 |
+
title="YENA",
|
135 |
+
description="Talk to Your Nephrology Tutor Yena",
|
136 |
+
theme="compact",
|
137 |
+
layout="vertical",
|
138 |
+
allow_flagging=False
|
139 |
+
)
|
140 |
+
|
141 |
+
# Run the Gradio interface
|
142 |
+
iface.launch()
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ffmpeg
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
openai
|
3 |
+
transformers
|
4 |
+
sentencepiece
|
5 |
+
pipeline
|
6 |
+
nltk
|
7 |
+
tiktoken
|
8 |
+
openai-whisper
|
9 |
+
notion_df
|