Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import
|
2 |
import edge_tts
|
3 |
import asyncio
|
4 |
import tempfile
|
@@ -11,7 +11,7 @@ import random
|
|
11 |
|
12 |
default_lang = "en"
|
13 |
|
14 |
-
engines = {
|
15 |
|
16 |
def transcribe(audio):
|
17 |
lang = "en"
|
@@ -30,7 +30,7 @@ def client_fn(model):
|
|
30 |
return InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
31 |
elif "Phi" in model:
|
32 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
33 |
-
else:
|
34 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
35 |
|
36 |
def randomize_seed_fn(seed: int) -> int:
|
@@ -48,15 +48,15 @@ Respond in a normal, conversational manner while being friendly and helpful.
|
|
48 |
def models(text, model="Mixtral 8x7B", seed=42):
|
49 |
|
50 |
seed = int(randomize_seed_fn(seed))
|
51 |
-
generator = torch.Generator().manual_seed(seed)
|
52 |
-
|
53 |
client = client_fn(model)
|
54 |
-
|
55 |
generate_kwargs = dict(
|
56 |
max_new_tokens=300,
|
57 |
seed=seed
|
58 |
)
|
59 |
-
|
60 |
formatted_prompt = system_instructions1 + text + "[JARVIS]"
|
61 |
stream = client.text_generation(
|
62 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
@@ -74,69 +74,34 @@ async def respond(audio, model, seed):
|
|
74 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
75 |
tmp_path = tmp_file.name
|
76 |
await communicate.save(tmp_path)
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
gr.Interface(
|
109 |
-
batch=True,
|
110 |
-
max_batch_size=10,
|
111 |
-
fn=respond,
|
112 |
-
inputs=[input, select, seed],
|
113 |
-
outputs=[output], live=True)
|
114 |
-
|
115 |
-
with gr.Row():
|
116 |
-
select = gr.Dropdown([ 'Mixtral 8x7B',
|
117 |
-
'Llama 3 8B',
|
118 |
-
'Mistral 7B v0.3',
|
119 |
-
'Phi 3 mini',
|
120 |
-
],
|
121 |
-
value="Mistral 7B v0.3",
|
122 |
-
label="Model"
|
123 |
-
)
|
124 |
-
seed = gr.Slider(
|
125 |
-
label="Seed",
|
126 |
-
minimum=0,
|
127 |
-
maximum=999999,
|
128 |
-
step=1,
|
129 |
-
value=0,
|
130 |
-
visible=False
|
131 |
-
)
|
132 |
-
input = gr.Textbox(label="User")
|
133 |
-
output = gr.Textbox(label="AI", interactive=False)
|
134 |
-
gr.Interface(
|
135 |
-
batch=True,
|
136 |
-
max_batch_size=10,
|
137 |
-
fn=models,
|
138 |
-
inputs=[input, select, seed],
|
139 |
-
outputs=[output], live=True)
|
140 |
-
|
141 |
-
if __name__ == "__main__":
|
142 |
-
demo.queue(max_size=200).launch()
|
|
|
1 |
+
import streamlit as st
|
2 |
import edge_tts
|
3 |
import asyncio
|
4 |
import tempfile
|
|
|
11 |
|
12 |
default_lang = "en"
|
13 |
|
14 |
+
engines = {default_lang: Model(default_lang)}
|
15 |
|
16 |
def transcribe(audio):
|
17 |
lang = "en"
|
|
|
30 |
return InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
31 |
elif "Phi" in model:
|
32 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
33 |
+
else:
|
34 |
return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
|
35 |
|
36 |
def randomize_seed_fn(seed: int) -> int:
|
|
|
48 |
def models(text, model="Mixtral 8x7B", seed=42):
|
49 |
|
50 |
seed = int(randomize_seed_fn(seed))
|
51 |
+
generator = torch.Generator().manual_seed(seed)
|
52 |
+
|
53 |
client = client_fn(model)
|
54 |
+
|
55 |
generate_kwargs = dict(
|
56 |
max_new_tokens=300,
|
57 |
seed=seed
|
58 |
)
|
59 |
+
|
60 |
formatted_prompt = system_instructions1 + text + "[JARVIS]"
|
61 |
stream = client.text_generation(
|
62 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
|
|
74 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
75 |
tmp_path = tmp_file.name
|
76 |
await communicate.save(tmp_path)
|
77 |
+
return tmp_path
|
78 |
+
|
79 |
+
st.title("JARVIS⚡")
|
80 |
+
st.markdown("### A personal Assistant of Tony Stark for YOU")
|
81 |
+
st.markdown("### Voice Chat with your personal Assistant")
|
82 |
+
|
83 |
+
with st.form("voice_form"):
|
84 |
+
model_choice = st.selectbox("Choose a model", ['Mixtral 8x7B', 'Llama 3 8B', 'Mistral 7B v0.3', 'Phi 3 mini'], key="voice_model")
|
85 |
+
audio_file = st.file_uploader("Upload Audio", type=["wav", "mp3"], key="voice_audio")
|
86 |
+
submit_button = st.form_submit_button("Submit")
|
87 |
+
|
88 |
+
if submit_button:
|
89 |
+
if audio_file is not None:
|
90 |
+
with st.spinner("Transcribing and generating response..."):
|
91 |
+
audio_bytes = audio_file.read()
|
92 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
93 |
+
tmp_file.write(audio_bytes)
|
94 |
+
tmp_path = tmp_file.name
|
95 |
+
response = respond(tmp_path, model_choice, 42)
|
96 |
+
st.audio(response, format='audio/wav')
|
97 |
+
|
98 |
+
with st.form("text_form"):
|
99 |
+
model_choice = st.selectbox("Choose a model", ['Mixtral 8x7B', 'Llama 3 8B', 'Mistral 7B v0.3', 'Phi 3 mini'], key="text_model")
|
100 |
+
user_text = st.text_area("Enter your message:", key="text_input")
|
101 |
+
submit_button = st.form_submit_button("Submit")
|
102 |
+
|
103 |
+
if submit_button:
|
104 |
+
if user_text:
|
105 |
+
with st.spinner("Generating response..."):
|
106 |
+
response = models(user_text, model_choice, 42)
|
107 |
+
st.text_area("JARVIS Response", value=response, key="text_output", height=150)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|