awacke1 commited on
Commit
37112c3
β€’
1 Parent(s): d1c1759

Create backup13.SpechIO.app.py

Browse files
Files changed (1) hide show
  1. backup13.SpechIO.app.py +351 -0
backup13.SpechIO.app.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
5
+ from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import deque
9
+ from dotenv import load_dotenv
10
+ from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
+ from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
+ from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+
21
+ # πŸ”§ Config & Setup
22
+ st.set_page_config(
23
+ page_title="🚲BikeAIπŸ† Claude/GPT Research",
24
+ page_icon="πŸš²πŸ†",
25
+ layout="wide",
26
+ initial_sidebar_state="auto",
27
+ menu_items={
28
+ 'Get Help': 'https://huggingface.co/awacke1',
29
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
30
+ 'About': "🚲BikeAIπŸ† Claude/GPT Research AI"
31
+ }
32
+ )
33
+ load_dotenv()
34
+ openai.api_key = os.getenv('OPENAI_API_KEY') or st.secrets['OPENAI_API_KEY']
35
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3") or st.secrets["ANTHROPIC_API_KEY"]
36
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
37
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
38
+ HF_KEY = os.getenv('HF_KEY')
39
+ API_URL = os.getenv('API_URL')
40
+
41
+ st.session_state.setdefault('transcript_history', [])
42
+ st.session_state.setdefault('chat_history', [])
43
+ st.session_state.setdefault('openai_model', "gpt-4o-2024-05-13")
44
+ st.session_state.setdefault('messages', [])
45
+ st.session_state.setdefault('last_voice_input', "")
46
+
47
+ # 🎨 Minimal Custom CSS
48
+ st.markdown("""
49
+ <style>
50
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
51
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
52
+ </style>
53
+ """, unsafe_allow_html=True)
54
+
55
+ # πŸ”‘ Common Utilities
56
+ def generate_filename(prompt, file_type="md"):
57
+ ctz = pytz.timezone('US/Central')
58
+ date_str = datetime.now(ctz).strftime("%m%d_%H%M")
59
+ safe = re.sub(r'[<>:"/\\\\|?*\n]', ' ', prompt)
60
+ safe = re.sub(r'\s+', ' ', safe).strip()[:90]
61
+ return f"{date_str}_{safe}.{file_type}"
62
+
63
+ def create_file(filename, prompt, response):
64
+ with open(filename, 'w', encoding='utf-8') as f:
65
+ f.write(prompt + "\n\n" + response)
66
+
67
+ def get_download_link(file):
68
+ with open(file, "rb") as f:
69
+ b64 = base64.b64encode(f.read()).decode()
70
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
71
+
72
+ @st.cache_resource
73
+ def speech_synthesis_html(result):
74
+ html_code = f"""
75
+ <html><body>
76
+ <script>
77
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
78
+ window.speechSynthesis.speak(msg);
79
+ </script>
80
+ </body></html>
81
+ """
82
+ components.html(html_code, height=0)
83
+
84
+ def process_image(image_path, user_prompt):
85
+ with open(image_path, "rb") as imgf:
86
+ image_data = imgf.read()
87
+ b64img = base64.b64encode(image_data).decode("utf-8")
88
+ resp = openai_client.chat.completions.create(
89
+ model=st.session_state["openai_model"],
90
+ messages=[
91
+ {"role": "system", "content": "You are a helpful assistant."},
92
+ {"role": "user", "content": [
93
+ {"type": "text", "text": user_prompt},
94
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
95
+ ]}
96
+ ],
97
+ temperature=0.0,
98
+ )
99
+ return resp.choices[0].message.content
100
+
101
+ def process_audio(audio_path):
102
+ with open(audio_path, "rb") as f:
103
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
104
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
105
+ return transcription.text
106
+
107
+ def process_video(video_path, seconds_per_frame=1):
108
+ vid = cv2.VideoCapture(video_path)
109
+ total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
110
+ fps = vid.get(cv2.CAP_PROP_FPS)
111
+ skip = int(fps*seconds_per_frame)
112
+ frames_b64 = []
113
+ for i in range(0, total, skip):
114
+ vid.set(cv2.CAP_PROP_POS_FRAMES, i)
115
+ ret, frame = vid.read()
116
+ if not ret: break
117
+ _, buf = cv2.imencode(".jpg", frame)
118
+ frames_b64.append(base64.b64encode(buf).decode("utf-8"))
119
+ vid.release()
120
+ return frames_b64
121
+
122
+ def process_video_with_gpt(video_path, prompt):
123
+ frames = process_video(video_path)
124
+ resp = openai_client.chat.completions.create(
125
+ model=st.session_state["openai_model"],
126
+ messages=[
127
+ {"role":"system","content":"Analyze video frames."},
128
+ {"role":"user","content":[
129
+ {"type":"text","text":prompt},
130
+ *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
131
+ ]}
132
+ ]
133
+ )
134
+ return resp.choices[0].message.content
135
+
136
+ def search_arxiv(query):
137
+ st.write("πŸ” Searching ArXiv...")
138
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
139
+ r1 = client.predict(prompt=query, llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1", stream_outputs=True, api_name="/ask_llm")
140
+ st.markdown("### Mistral-8x7B-Instruct-v0.1 Result")
141
+ st.markdown(r1)
142
+ r2 = client.predict(prompt=query, llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2", stream_outputs=True, api_name="/ask_llm")
143
+ st.markdown("### Mistral-7B-Instruct-v0.2 Result")
144
+ st.markdown(r2)
145
+ return f"{r1}\n\n{r2}"
146
+
147
+ def perform_ai_lookup(q):
148
+ start = time.time()
149
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
150
+ r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
151
+ refs = r[0]
152
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
153
+ result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
154
+ speech_synthesis_html(r2)
155
+ st.markdown(result)
156
+ elapsed = time.time()-start
157
+ st.write(f"Elapsed: {elapsed:.2f} s")
158
+ fn = generate_filename(q,"md")
159
+ create_file(fn,q,result)
160
+ return result
161
+
162
+ def process_with_gpt(text):
163
+ if not text: return
164
+ st.session_state.messages.append({"role":"user","content":text})
165
+ with st.chat_message("user"):
166
+ st.markdown(text)
167
+ with st.chat_message("assistant"):
168
+ c = openai_client.chat.completions.create(
169
+ model=st.session_state["openai_model"],
170
+ messages=st.session_state.messages,
171
+ stream=False
172
+ )
173
+ ans = c.choices[0].message.content
174
+ st.write("GPT-4o: " + ans)
175
+ create_file(generate_filename(text,"md"),text,ans)
176
+ st.session_state.messages.append({"role":"assistant","content":ans})
177
+ return ans
178
+
179
+ def process_with_claude(text):
180
+ if not text: return
181
+ with st.chat_message("user"):
182
+ st.markdown(text)
183
+ with st.chat_message("assistant"):
184
+ r = claude_client.messages.create(
185
+ model="claude-3-sonnet-20240229",
186
+ max_tokens=1000,
187
+ messages=[{"role":"user","content":text}]
188
+ )
189
+ ans = r.content[0].text
190
+ st.write("Claude: " + ans)
191
+ create_file(generate_filename(text,"md"),text,ans)
192
+ st.session_state.chat_history.append({"user":text,"claude":ans})
193
+ return ans
194
+
195
+ def create_zip_of_files(files):
196
+ zip_name = "all_files.zip"
197
+ with zipfile.ZipFile(zip_name,'w') as z:
198
+ for f in files: z.write(f)
199
+ return zip_name
200
+
201
+ def get_media_html(p,typ="video",w="100%"):
202
+ d = base64.b64encode(open(p,'rb').read()).decode()
203
+ if typ=="video":
204
+ return f'<video width="{w}" controls autoplay muted loop><source src="data:video/mp4;base64,{d}" type="video/mp4"></video>'
205
+ else:
206
+ return f'<audio controls style="width:{w};"><source src="data:audio/mpeg;base64,{d}" type="audio/mpeg"></audio>'
207
+
208
+ def create_media_gallery():
209
+ st.header("🎬 Media Gallery")
210
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video"])
211
+ with tabs[0]:
212
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
213
+ if imgs:
214
+ c = st.slider("Cols",1,5,3)
215
+ cols = st.columns(c)
216
+ for i,f in enumerate(imgs):
217
+ with cols[i%c]:
218
+ st.image(Image.open(f),use_container_width=True)
219
+ if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}"):
220
+ a = process_image(f,"Describe this image.")
221
+ st.markdown(a)
222
+ with tabs[1]:
223
+ auds = glob.glob("*.mp3")+glob.glob("*.wav")
224
+ for a in auds:
225
+ with st.expander(f"🎡 {os.path.basename(a)}"):
226
+ st.markdown(get_media_html(a,"audio"),unsafe_allow_html=True)
227
+ if st.button(f"Transcribe {os.path.basename(a)}"):
228
+ t = process_audio(a)
229
+ st.write(t)
230
+ with tabs[2]:
231
+ vids = glob.glob("*.mp4")
232
+ for v in vids:
233
+ with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
234
+ st.markdown(get_media_html(v,"video"),unsafe_allow_html=True)
235
+ if st.button(f"Analyze {os.path.basename(v)}"):
236
+ a = process_video_with_gpt(v,"Describe video.")
237
+ st.markdown(a)
238
+
239
+ def display_file_manager():
240
+ st.sidebar.title("πŸ“ File Management")
241
+ files = sorted(glob.glob("*.md"),reverse=True)
242
+ if st.sidebar.button("πŸ—‘ Delete All"):
243
+ for f in files: os.remove(f)
244
+ st.experimental_rerun()
245
+ if st.sidebar.button("⬇️ Download All"):
246
+ z= create_zip_of_files(files)
247
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
248
+ for f in files:
249
+ col1,col2,col3,col4 = st.sidebar.columns([1,3,1,1])
250
+ with col1:
251
+ if st.button("🌐",key="v"+f):
252
+ st.session_state.current_file=f
253
+ c=open(f,'r',encoding='utf-8').read()
254
+ st.write(c)
255
+ with col2:
256
+ st.markdown(get_download_link(f),unsafe_allow_html=True)
257
+ with col3:
258
+ if st.button("πŸ“‚",key="e"+f):
259
+ st.session_state.current_file=f
260
+ st.session_state.file_content=open(f,'r',encoding='utf-8').read()
261
+ with col4:
262
+ if st.button("πŸ—‘",key="d"+f):
263
+ os.remove(f)
264
+ st.experimental_rerun()
265
+
266
+ def main():
267
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
268
+ tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
269
+ model_choice = st.sidebar.radio("AI Model:",["GPT+Claude+Arxiv","GPT-4o","Claude-3"])
270
+
271
+ # Speech-to-Text component placeholder (example)
272
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
273
+ val = mycomponent(my_input_value="Hello")
274
+ if val:
275
+ user_input = val
276
+ if model_choice == "GPT-4o":
277
+ process_with_gpt(user_input)
278
+ elif model_choice == "Claude-3":
279
+ process_with_claude(user_input)
280
+ else:
281
+ col1,col2,col3=st.columns(3)
282
+ with col1:
283
+ st.subheader("GPT-4o Omni:")
284
+ try: process_with_gpt(user_input)
285
+ except: st.write('GPT 4o error')
286
+ with col2:
287
+ st.subheader("Claude-3 Sonnet:")
288
+ try: process_with_claude(user_input)
289
+ except: st.write('Claude error')
290
+ with col3:
291
+ st.subheader("Arxiv + Mistral:")
292
+ try:
293
+ r = perform_ai_lookup(user_input)
294
+ st.markdown(r)
295
+ except:
296
+ st.write("Arxiv error")
297
+
298
+ if tab_main == "🎀 Voice Input":
299
+ st.subheader("🎀 Voice Recognition")
300
+ user_text = st.text_area("Message:", height=100)
301
+ if st.button("Send πŸ“¨"):
302
+ if user_text:
303
+ if model_choice == "GPT-4o":
304
+ process_with_gpt(user_text)
305
+ elif model_choice == "Claude-3":
306
+ process_with_claude(user_text)
307
+ else:
308
+ col1,col2,col3=st.columns(3)
309
+ with col1:
310
+ st.subheader("GPT-4o Omni:")
311
+ process_with_gpt(user_text)
312
+ with col2:
313
+ st.subheader("Claude-3 Sonnet:")
314
+ process_with_claude(user_text)
315
+ with col3:
316
+ st.subheader("Arxiv & Mistral:")
317
+ res = perform_ai_lookup(user_text)
318
+ st.markdown(res)
319
+ st.subheader("πŸ“œ Chat History")
320
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
321
+ with t1:
322
+ for c in st.session_state.chat_history:
323
+ st.write("**You:**", c["user"])
324
+ st.write("**Claude:**", c["claude"])
325
+ with t2:
326
+ for m in st.session_state.messages:
327
+ with st.chat_message(m["role"]):
328
+ st.markdown(m["content"])
329
+
330
+ elif tab_main == "πŸ“Έ Media Gallery":
331
+ create_media_gallery()
332
+
333
+ elif tab_main == "πŸ” Search ArXiv":
334
+ q=st.text_input("Research query:")
335
+ if q:
336
+ r=search_arxiv(q)
337
+ st.markdown(r)
338
+
339
+ elif tab_main == "πŸ“ File Editor":
340
+ if getattr(st.session_state,'current_file',None):
341
+ st.subheader(f"Editing: {st.session_state.current_file}")
342
+ new_text = st.text_area("Content:", st.session_state.file_content, height=300)
343
+ if st.button("Save"):
344
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
345
+ f.write(new_text)
346
+ st.success("Updated!")
347
+
348
+ display_file_manager()
349
+
350
+ if __name__=="__main__":
351
+ main()