Engineering a Better Voice Assist?

#2
by awacke1 - opened

Analyze how this code example creates a voice assistant. Create a better approach using streamlit, python, and html5/Javascript in streamlit to make a better demo program that does the same.

import base64
from threading import Lock, Thread

import cv2
import openai
from cv2 import VideoCapture, imencode
from dotenv import load_dotenv
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.schema.messages import SystemMessage
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
from pyaudio import PyAudio, paInt16
from speech_recognition import Microphone, Recognizer, UnknownValueError

load_dotenv()

class WebcamStream:
def init(self):
self.stream = VideoCapture(index=0)
_, self.frame = self.stream.read()
self.running = False
self.lock = Lock()

def start(self):
    if self.running:
        return self

    self.running = True

    self.thread = Thread(target=self.update, args=())
    self.thread.start()
    return self

def update(self):
    while self.running:
        _, frame = self.stream.read()

        self.lock.acquire()
        self.frame = frame
        self.lock.release()

def read(self, encode=False):
    self.lock.acquire()
    frame = self.frame.copy()
    self.lock.release()

    if encode:
        _, buffer = imencode(".jpeg", frame)
        return base64.b64encode(buffer)

    return frame

def stop(self):
    self.running = False
    if self.thread.is_alive():
        self.thread.join()

def __exit__(self, exc_type, exc_value, exc_traceback):
    self.stream.release()

class Assistant:
def init(self, model):
self.chain = self._create_inference_chain(model)

def answer(self, prompt, image):
    if not prompt:
        return

    print("Prompt:", prompt)

    response = self.chain.invoke(
        {"prompt": prompt, "image_base64": image.decode()},
        config={"configurable": {"session_id": "unused"}},
    ).strip()

    print("Response:", response)

    if response:
        self._tts(response)

def _tts(self, response):
    player = PyAudio().open(format=paInt16, channels=1, rate=24000, output=True)

    with openai.audio.speech.with_streaming_response.create(
        model="tts-1",
        voice="alloy",
        response_format="pcm",
        input=response,
    ) as stream:
        for chunk in stream.iter_bytes(chunk_size=1024):
            player.write(chunk)

def _create_inference_chain(self, model):
    SYSTEM_PROMPT = """
    You are a witty assistant that will use the chat history and the image 
    provided by the user to answer its questions. Your job is to answer 
    questions.

    Use few words on your answers. Go straight to the point. Do not use any
    emoticons or emojis. 

    Be friendly and helpful. Show some personality.
    """

    prompt_template = ChatPromptTemplate.from_messages(
        [
            SystemMessage(content=SYSTEM_PROMPT),
            MessagesPlaceholder(variable_name="chat_history"),
            (
                "human",
                [
                    {"type": "text", "text": "{prompt}"},
                    {
                        "type": "image_url",
                        "image_url": "data:image/jpeg;base64,{image_base64}",
                    },
                ],
            ),
        ]
    )

    chain = prompt_template | model | StrOutputParser()

    chat_message_history = ChatMessageHistory()
    return RunnableWithMessageHistory(
        chain,
        lambda _: chat_message_history,
        input_messages_key="prompt",
        history_messages_key="chat_history",
    )

webcam_stream = WebcamStream().start()

model = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest")

You can use OpenAI's GPT-4o model instead of Gemini Flash

by uncommenting the following line:

model = ChatOpenAI(model="gpt-4o")

assistant = Assistant(model)

def audio_callback(recognizer, audio):
try:
prompt = recognizer.recognize_whisper(audio, model="base", language="english")
assistant.answer(prompt, webcam_stream.read(encode=True))

except UnknownValueError:
    print("There was an error processing the audio.")

recognizer = Recognizer()
microphone = Microphone()
with microphone as source:
recognizer.adjust_for_ambient_noise(source)

stop_listening = recognizer.listen_in_background(microphone, audio_callback)

while True:
cv2.imshow("webcam", webcam_stream.read())
if cv2.waitKey(1) in [27, ord("q")]:
break

webcam_stream.stop()
cv2.destroyAllWindows()
stop_listening(wait_for_stop=False) For example on how to do the GPT-4o and Claude AI as well as Arxiv AI use this example: import streamlit as st
import anthropic
import openai
import base64
from datetime import datetime
import plotly.graph_objects as go
import cv2
import glob
import json
import math
import os
import pytz
import random
import re
import requests
import streamlit.components.v1 as components
import textract
import time
import zipfile
from audio_recorder_streamlit import audio_recorder
from bs4 import BeautifulSoup
from collections import deque
from dotenv import load_dotenv
from gradio_client import Client
from huggingface_hub import InferenceClient
from io import BytesIO
from PIL import Image
from PyPDF2 import PdfReader
from urllib.parse import quote
from xml.etree import ElementTree as ET
from openai import OpenAI
import extra_streamlit_components as stx
from streamlit.runtime.scriptrunner import get_script_run_ctx

1. ๐ŸšฒBikeAI๐Ÿ† Configuration and Setup

Site_Name = '๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI'
title = "๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI"
helpURL = 'https://huggingface.co/awacke1'
bugURL = 'https://huggingface.co/spaces/awacke1'
icons = '๐Ÿšฒ๐Ÿ†'
st.set_page_config(
page_title=title,
page_icon=icons,
layout="wide",
initial_sidebar_state="auto",
menu_items={
'Get Help': helpURL,
'Report a bug': bugURL,
'About': title
}
)
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
if openai.api_key == None:
openai.api_key = st.secrets['OPENAI_API_KEY']
openai_client = OpenAI(
api_key=os.getenv('OPENAI_API_KEY'),
organization=os.getenv('OPENAI_ORG_ID')
)
anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
if anthropic_key == None:
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
claude_client = anthropic.Anthropic(api_key=anthropic_key)
API_URL = os.getenv('API_URL')
HF_KEY = os.getenv('HF_KEY')
MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
MODEL2 = "openai/whisper-small.en"
headers = {
"Authorization": f"Bearer {HF_KEY}",
"Content-Type": "application/json"
}

2.๐ŸšฒBikeAI๐Ÿ† Initialize session states

if 'transcript_history' not in st.session_state:
st.session_state.transcript_history = []
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4o-2024-05-13"
if "messages" not in st.session_state:
st.session_state.messages = []
if 'last_voice_input' not in st.session_state:
st.session_state.last_voice_input = ""

3. ๐ŸšฒBikeAI๐Ÿ† Custom CSS

st.markdown("""

""", unsafe_allow_html=True)

create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)

def generate_filename(prompt, file_type):
"""Generate a safe filename using the prompt and file type."""
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
replaced_prompt = re.sub(r'[<>:"/\|?*\n]', ' ', prompt)
safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
return f"{safe_date_time}_{safe_prompt}.{file_type}"
def create_file(filename, prompt, response, should_save=True):
if not should_save:
return
with open(filename, 'w', encoding='utf-8') as file:
file.write(prompt + "\n\n" + response)
def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
"""Create and save file with proper handling of different types."""
if not should_save:
return None
filename = generate_filename(prompt if prompt else content, file_type)
with open(filename, "w", encoding="utf-8") as f:
if is_image:
f.write(content)
else:
f.write(prompt + "\n\n" + content if prompt else content)
return filename

Load a file, base64 it, return as link

def get_download_link(file_path):
"""Create download link for file."""
with open(file_path, "rb") as file:
contents = file.read()
b64 = base64.b64encode(contents).decode()
return f'Download {os.path.basename(file_path)}๐Ÿ“‚'

Speech Synth Browser Style

@st .cache_resource
def SpeechSynthesis(result):
"""HTML5 Speech Synthesis."""
documentHTML5 = f'''



Read It Aloud



๐Ÿ”Š Read It Aloud


{result}





'''
components.html(documentHTML5, width=1280, height=300)

Media Processing Functions

def process_image(image_input, user_prompt):
"""Process image with GPT-4o vision."""
if isinstance(image_input, str):
with open(image_input, "rb") as image_file:
image_input = image_file.read()
base64_image = base64.b64encode(image_input).decode("utf-8")
response = openai_client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
{"role": "user", "content": [
{"type": "text", "text": user_prompt},
{"type": "image_url", "image_url": {
"url": f"data:image/png;base64,{base64_image}"
}}
]}
],
temperature=0.0,
)
return response.choices[0].message.content

def process_audio(audio_input, text_input=''):
"""Process audio with Whisper and GPT."""
if isinstance(audio_input, str):
with open(audio_input, "rb") as file:
audio_input = file.read()
transcription = openai_client.audio.transcriptions.create(
model="whisper-1",
file=audio_input,
)
st.session_state.messages.append({"role": "user", "content": transcription.text})
with st.chat_message("assistant"):
st.markdown(transcription.text)
SpeechSynthesis(transcription.text)
filename = generate_filename(transcription.text, "wav")
create_and_save_file(audio_input, "wav", transcription.text, True)

Modified video processing function without moviepy dependency

def process_video(video_path, seconds_per_frame=1):
"""Process video files for frame extraction."""
base64Frames = []
video = cv2.VideoCapture(video_path)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = video.get(cv2.CAP_PROP_FPS)
frames_to_skip = int(fps * seconds_per_frame)

for frame_idx in range(0, total_frames, frames_to_skip):
    video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
    success, frame = video.read()
    if not success:
        break
    _, buffer = cv2.imencode(".jpg", frame)
    base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
video.release()
return base64Frames, None

def process_video_with_gpt(video_input, user_prompt):
"""Process video with GPT-4 vision."""
base64Frames, _ = process_video(video_input)
response = openai_client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": "system", "content": "Analyze the video frames and provide a detailed description."},
{"role": "user", "content": [
{"type": "text", "text": user_prompt},
*[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
for frame in base64Frames]
]}
]
)
return response.choices[0].message.content

def extract_urls(text):
try:
date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
abs_link_pattern = re.compile(r'[(.?)]((https://arxiv\.org/abs/\d+\.\d+)\)')
pdf_link_pattern = re.compile(r'[โฌ‡๏ธ]((https://arxiv\.org/pdf/\d+\.\d+)\)')
title_pattern = re.compile(r'### \d{2} \w{3} \d{4} | [(.
?)]')
date_matches = date_pattern.findall(text)
abs_link_matches = abs_link_pattern.findall(text)
pdf_link_matches = pdf_link_pattern.findall(text)
title_matches = title_pattern.findall(text)
# markdown with the extracted fields
markdown_text = ""
for i in range(len(date_matches)):
date = date_matches[i]
title = title_matches[i]
abs_link = abs_link_matches[i][1]
pdf_link = pdf_link_matches[i]
markdown_text += f"Date: {date}\n\n"
markdown_text += f"Title: {title}\n\n"
markdown_text += f"Abstract Link: {abs_link}\n\n"
markdown_text += f"PDF Link: {pdf_link}\n\n"
markdown_text += "---\n\n"
return markdown_text
except:
st.write('.')
return ''

def search_arxiv(query):
st.write("Performing AI Lookup...")
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
result1 = client.predict(
prompt=query,
llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
stream_outputs=True,
api_name="/ask_llm"
)
st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
st.markdown(result1)
result2 = client.predict(
prompt=query,
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
stream_outputs=True,
api_name="/ask_llm"
)
st.markdown("### Mistral-7B-Instruct-v0.2 Result")
st.markdown(result2)
combined_result = f"{result1}\n\n{result2}"
return combined_result
#return responseall

Function to generate a filename based on prompt and time (because names matter ๐Ÿ•’)

def generate_filename(prompt, file_type):
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
safe_prompt = re.sub(r'\W+', '', prompt)[:90]
return f"{safe_date_time}
{safe_prompt}.{file_type}"

Function to create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)

def create_file(filename, prompt, response):
with open(filename, 'w', encoding='utf-8') as file:
file.write(prompt + "\n\n" + response)

def perform_ai_lookup(query):
start_time = time.strftime("%Y-%m-%d %H:%M:%S")
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
response1 = client.predict(
query,
20,
"Semantic Search",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
api_name="/update_with_rag_md"
)
Question = '### ๐Ÿ”Ž ' + query + '\r\n' # Format for markdown display with links
References = response1[0]
ReferenceLinks = extract_urls(References)
RunSecondQuery = True
results=''
if RunSecondQuery:
# Search 2 - Retrieve the Summary with Papers Context and Original Query
response2 = client.predict(
query,
"mistralai/Mixtral-8x7B-Instruct-v0.1",
True,
api_name="/ask_llm"
)
if len(response2) > 10:
Answer = response2
SpeechSynthesis(Answer)
# Restructure results to follow format of Question, Answer, References, ReferenceLinks
results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
st.markdown(results)
st.write('๐Ÿ”Run of Multi-Agent System Paper Summary Spec is Complete')
end_time = time.strftime("%Y-%m-%d %H:%M:%S")
start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
elapsed_seconds = end_timestamp - start_timestamp
st.write(f"Start time: {start_time}")
st.write(f"Finish time: {end_time}")
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
filename = generate_filename(query, "md")
create_file(filename, query, results)
return results

Chat Processing Functions

def process_with_gpt(text_input):
"""Process text with GPT-4o."""
if text_input:
st.session_state.messages.append({"role": "user", "content": text_input})
with st.chat_message("user"):
st.markdown(text_input)
with st.chat_message("assistant"):
completion = openai_client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=False
)
return_text = completion.choices[0].message.content
st.write("GPT-4o: " + return_text)
#filename = generate_filename(text_input, "md")
filename = generate_filename("GPT-4o: " + return_text, "md")
create_file(filename, text_input, return_text)
st.session_state.messages.append({"role": "assistant", "content": return_text})
return return_text

def process_with_claude(text_input):
"""Process text with Claude."""
if text_input:
with st.chat_message("user"):
st.markdown(text_input)
with st.chat_message("assistant"):
response = claude_client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
messages=[
{"role": "user", "content": text_input}
]
)
response_text = response.content[0].text
st.write("Claude: " + response_text)
#filename = generate_filename(text_input, "md")
filename = generate_filename("Claude: " + response_text, "md")
create_file(filename, text_input, response_text)
st.session_state.chat_history.append({
"user": text_input,
"claude": response_text
})
return response_text

File Management Functions

def load_file(file_name):
"""Load file content."""
with open(file_name, "r", encoding='utf-8') as file:
content = file.read()
return content

def create_zip_of_files(files):
"""Create zip archive of files."""
zip_name = "all_files.zip"
with zipfile.ZipFile(zip_name, 'w') as zipf:
for file in files:
zipf.write(file)
return zip_name

def get_media_html(media_path, media_type="video", width="100%"):
"""Generate HTML for media player."""
media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
if media_type == "video":
return f'''

'''
else: # audio
return f'''

'''

def create_media_gallery():
"""Create the media gallery interface."""
st.header("๐ŸŽฌ Media Gallery")
tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video"])
with tabs[0]:
image_files = glob.glob(".png") + glob.glob(".jpg")
if image_files:
num_cols = st.slider("Number of columns", 1, 5, 3)
cols = st.columns(num_cols)
for idx, image_file in enumerate(image_files):
with cols[idx % num_cols]:
img = Image.open(image_file)
st.image(img, use_container_width=True)
# Add GPT vision analysis option
if st.button(f"Analyze {os.path.basename(image_file)}"):
analysis = process_image(image_file,
"Describe this image in detail and identify key elements.")
st.markdown(analysis)
with tabs[1]:
audio_files = glob.glob(".mp3") + glob.glob(".wav")
for audio_file in audio_files:
with st.expander(f"๐ŸŽต {os.path.basename(audio_file)}"):
st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
if st.button(f"Transcribe {os.path.basename(audio_file)}"):
with open(audio_file, "rb") as f:
transcription = process_audio(f)
st.write(transcription)
with tabs[2]:
video_files = glob.glob("*.mp4")
for video_file in video_files:
with st.expander(f"๐ŸŽฅ {os.path.basename(video_file)}"):
st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
if st.button(f"Analyze {os.path.basename(video_file)}"):
analysis = process_video_with_gpt(video_file,
"Describe what's happening in this video.")
st.markdown(analysis)

def display_file_manager():
"""Display file management sidebar with guaranteed unique button keys."""
st.sidebar.title("๐Ÿ“ File Management")
all_files = glob.glob("*.md")
all_files.sort(reverse=True)
if st.sidebar.button("๐Ÿ—‘ Delete All", key="delete_all_files_button"):
for file in all_files:
os.remove(file)
st.rerun()
if st.sidebar.button("โฌ‡๏ธ Download All", key="download_all_files_button"):
zip_file = create_zip_of_files(all_files)
st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
# Create unique keys using file attributes
for idx, file in enumerate(all_files):
# Get file stats for unique identification
file_stat = os.stat(file)
unique_id = f"{idx}{file_stat.st_size}{file_stat.st_mtime}"
col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
with col1:
if st.button("๐ŸŒ", key=f"view_{unique_id}"):
st.session_state.current_file = file
st.session_state.file_content = load_file(file)
with col2:
st.markdown(get_download_link(file), unsafe_allow_html=True)
with col3:
if st.button("๐Ÿ“‚", key=f"edit_{unique_id}"):
st.session_state.current_file = file
st.session_state.file_content = load_file(file)
with col4:
if st.button("๐Ÿ—‘", key=f"delete_{unique_id}"):
os.remove(file)
st.rerun()

Speech Recognition HTML Component

speech_recognition_html = """

Continuous Speech Demo
Ready
<!-- Add the hidden input here -->
<input type="hidden" id="streamlit-data" value="">

<script>
    if (!('webkitSpeechRecognition' in window)) {
        alert('Speech recognition not supported');
    } else {
        const recognition = new webkitSpeechRecognition();
        const startButton = document.getElementById('start');
        const stopButton = document.getElementById('stop');
        const clearButton = document.getElementById('clear');
        const status = document.getElementById('status');
        const output = document.getElementById('output');
        let fullTranscript = '';
        let lastUpdateTime = Date.now();

        // Configure recognition
        recognition.continuous = true;
        recognition.interimResults = true;

        // Function to start recognition
        const startRecognition = () => {
            try {
                recognition.start();
                status.textContent = 'Listening...';
                startButton.disabled = true;
                stopButton.disabled = false;
            } catch (e) {
                console.error(e);
                status.textContent = 'Error: ' + e.message;
            }
        };

        // Auto-start on load
        window.addEventListener('load', () => {
            setTimeout(startRecognition, 1000);
        });

        startButton.onclick = startRecognition;

        stopButton.onclick = () => {
            recognition.stop();
            status.textContent = 'Stopped';
            startButton.disabled = false;
            stopButton.disabled = true;
        };

        clearButton.onclick = () => {
            fullTranscript = '';
            output.textContent = '';
            window.parent.postMessage({
                type: 'clear_transcript',
            }, '*');
        };

        recognition.onresult = (event) => {
            let interimTranscript = '';
            let finalTranscript = '';

            for (let i = event.resultIndex; i < event.results.length; i++) {
                const transcript = event.results[i][0].transcript;
                if (event.results[i].isFinal) {
                    finalTranscript += transcript + '\\n';
                } else {
                    interimTranscript += transcript;
                }
            }

            if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
                if (finalTranscript) {
                    fullTranscript += finalTranscript;
                    
                    // Update the hidden input value
                    document.getElementById('streamlit-data').value = fullTranscript;
                }
                lastUpdateTime = Date.now();
            }            

            output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
            output.scrollTop = output.scrollHeight;

            document.getElementById('streamlit-data').value = fullTranscript;

        };

        recognition.onend = () => {
            if (!stopButton.disabled) {
                try {
                    recognition.start();
                    console.log('Restarted recognition');
                } catch (e) {
                    console.error('Failed to restart recognition:', e);
                    status.textContent = 'Error restarting: ' + e.message;
                    startButton.disabled = false;
                    stopButton.disabled = true;
                }
            }
        };

        recognition.onerror = (event) => {
            console.error('Recognition error:', event.error);
            status.textContent = 'Error: ' + event.error;
            
            if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
                startButton.disabled = false;
                stopButton.disabled = true;
            }
        };
    }
</script>
"""

Helper Functions

def generate_filename(prompt, file_type):
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
replaced_prompt = re.sub(r'[<>:"/\|?*\n]', ' ', prompt)
safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
return f"{safe_date_time}_{safe_prompt}.{file_type}"

File Management Functions

def load_file(file_name):
"""Load file content."""
with open(file_name, "r", encoding='utf-8') as file:
content = file.read()
return content

def create_zip_of_files(files):
"""Create zip archive of files."""
zip_name = "all_files.zip"
with zipfile.ZipFile(zip_name, 'w') as zipf:
for file in files:
zipf.write(file)
return zip_name

def get_download_link(file):
"""Create download link for file."""
with open(file, "rb") as f:
contents = f.read()
b64 = base64.b64encode(contents).decode()
return f'Download {os.path.basename(file)}๐Ÿ“‚'

def display_file_manager():
"""Display file management sidebar."""
st.sidebar.title("๐Ÿ“ File Management")

all_files = glob.glob("*.md")
all_files.sort(reverse=True)

if st.sidebar.button("๐Ÿ—‘ Delete All"):
    for file in all_files:
        os.remove(file)
    st.rerun()

if st.sidebar.button("โฌ‡๏ธ Download All"):
    zip_file = create_zip_of_files(all_files)
    st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)

for file in all_files:
    col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
    with col1:
        if st.button("๐ŸŒ", key="view_"+file):
            st.session_state.current_file = file
            st.session_state.file_content = load_file(file)
    with col2:
        st.markdown(get_download_link(file), unsafe_allow_html=True)
    with col3:
        if st.button("๐Ÿ“‚", key="edit_"+file):
            st.session_state.current_file = file
            st.session_state.file_content = load_file(file)
    with col4:
        if st.button("๐Ÿ—‘", key="delete_"+file):
            os.remove(file)
            st.rerun()

def create_media_gallery():
"""Create the media gallery interface."""
st.header("๐ŸŽฌ Media Gallery")

tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video"])

with tabs[0]:
    image_files = glob.glob("*.png") + glob.glob("*.jpg")
    if image_files:
        num_cols = st.slider("Number of columns", 1, 5, 3)
        cols = st.columns(num_cols)
        for idx, image_file in enumerate(image_files):
            with cols[idx % num_cols]:
                img = Image.open(image_file)
                st.image(img, use_container_width=True)
                
                # Add GPT vision analysis option
                if st.button(f"Analyze {os.path.basename(image_file)}"):
                    analysis = process_image(image_file, 
                                          "Describe this image in detail and identify key elements.")
                    st.markdown(analysis)

with tabs[1]:
    audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
    for audio_file in audio_files:
        with st.expander(f"๐ŸŽต {os.path.basename(audio_file)}"):
            st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
            if st.button(f"Transcribe {os.path.basename(audio_file)}"):
                with open(audio_file, "rb") as f:
                    transcription = process_audio(f)
                    st.write(transcription)

with tabs[2]:
    video_files = glob.glob("*.mp4")
    for video_file in video_files:
        with st.expander(f"๐ŸŽฅ {os.path.basename(video_file)}"):
            st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
            if st.button(f"Analyze {os.path.basename(video_file)}"):
                analysis = process_video_with_gpt(video_file, 
                                                "Describe what's happening in this video.")
                st.markdown(analysis)

def get_media_html(media_path, media_type="video", width="100%"):
"""Generate HTML for media player."""
media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
if media_type == "video":
return f'''

'''
else: # audio
return f'''

'''

@st .cache_resource
def set_transcript(text):
"""Set transcript in session state."""
st.session_state.voice_transcript = text

def main():
st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")

# Main navigation
tab_main = st.radio("Choose Action:", 
                    ["๐ŸŽค Voice Input", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
                    horizontal=True)

if tab_main == "๐ŸŽค Voice Input":
    st.subheader("Voice Recognition")
    
    # Initialize session state for the transcript
    if 'voice_transcript' not in st.session_state:
        st.session_state.voice_transcript = ""
    
    # Display speech recognition component and capture returned value
    transcript = st.components.v1.html(speech_recognition_html, height=400)
    
    # Update session state if there's new data
    if transcript is not None and transcript != "":
        st.session_state.voice_transcript = transcript
    
    # Display the transcript in a Streamlit text area
    # st.markdown("### Processed Voice Input:")
    # st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
  
    # Model Selection
    model_choice = st.sidebar.radio(
        "Choose AI Model:",
        [ "GPT+Claude+Arxiv", "GPT-4o", "Claude-3"]
    )
    
    # Chat Interface
    user_input = st.text_area("Message:", height=100)
    
    if st.button("Send ๐Ÿ“จ"):
        if user_input:
            if model_choice == "GPT-4o":
                gpt_response = process_with_gpt(user_input)
            elif model_choice == "Claude-3":
                claude_response = process_with_claude(user_input)
            else:  # Both
                col1, col2, col3 = st.columns(3)
                with col2:
                    st.subheader("Claude-3.5 Sonnet:")
                    try:
                        claude_response = process_with_claude(user_input)
                    except:
                        st.write('Claude 3.5 Sonnet out of tokens.')
                with col1:
                    st.subheader("GPT-4o Omni:")
                    try:
                        gpt_response = process_with_gpt(user_input)  
                    except:
                        st.write('GPT 4o out of tokens')
                with col3:
                    st.subheader("Arxiv and Mistral Research:")
                    with st.spinner("Searching ArXiv..."):
                        #results = search_arxiv(user_input)
                        results = perform_ai_lookup(user_input)
                        
                        st.markdown(results)
                        
    # Display Chat History
    st.subheader("Chat History ๐Ÿ“œ")
    tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
    
    with tab1:
        for chat in st.session_state.chat_history:
            st.text_area("You:", chat["user"], height=100)
            st.text_area("Claude:", chat["claude"], height=200)
            st.markdown(chat["claude"])
    
    with tab2:
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])

elif tab_main == "๐Ÿ“ธ Media Gallery":
    create_media_gallery()

elif tab_main == "๐Ÿ” Search ArXiv":
    query = st.text_input("Enter your research query:")
    if query:
        with st.spinner("Searching ArXiv..."):
            results = search_arxiv(query)
            st.markdown(results)

elif tab_main == "๐Ÿ“ File Editor":
    if hasattr(st.session_state, 'current_file'):
        st.subheader(f"Editing: {st.session_state.current_file}")
        new_content = st.text_area("Content:", st.session_state.file_content, height=300)
        if st.button("Save Changes"):
            with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
                file.write(new_content)
            st.success("File updated successfully!")


# Always show file manager in sidebar
display_file_manager()

if name == "main":
main()

Sign up or log in to comment