GPT-4o-omni-text-audio-image-video / app.0523.plainvanilla.py
awacke1's picture
Rename app.py to app.0523.plainvanilla.py
7ae5c42 verified
raw
history blame
No virus
12.9 kB
import streamlit as st
import openai
from openai import OpenAI
import os, base64, cv2, glob
from moviepy.editor import VideoFileClip
from datetime import datetime
import pytz
from audio_recorder_streamlit import audio_recorder
from PIL import Image
openai.api_key, openai.organization = os.getenv('OPENAI_API_KEY'), os.getenv('OPENAI_ORG_ID')
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
MODEL = "gpt-4o-2024-05-13"
if 'messages' not in st.session_state:
st.session_state.messages = []
def generate_filename(prompt, file_type):
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
safe_prompt = "".join(x for x in prompt.replace(" ", "_").replace("\n", "_") if x.isalnum() or x == "_")[:90]
return f"{safe_date_time}_{safe_prompt}.{file_type}"
def create_file(filename, prompt, response, should_save=True):
if should_save and os.path.splitext(filename)[1] in ['.txt', '.htm', '.md']:
with open(os.path.splitext(filename)[0] + ".md", 'w', encoding='utf-8') as file:
file.write(response)
def process_text(text_input):
if text_input:
st.session_state.messages.append({"role": "user", "content": text_input})
with st.chat_message("user"):
st.markdown(text_input)
completion = client.chat.completions.create(model=MODEL, messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages], stream=False)
return_text = completion.choices[0].message.content
with st.chat_message("assistant"):
st.markdown(return_text)
filename = generate_filename(text_input, "md")
create_file(filename, text_input, return_text)
st.session_state.messages.append({"role": "assistant", "content": return_text})
def process_text2(MODEL='gpt-4o-2024-05-13', text_input='What is 2+2 and what is an imaginary number'):
if text_input:
st.session_state.messages.append({"role": "user", "content": text_input})
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages)
return_text = completion.choices[0].message.content
st.write("Assistant: " + return_text)
filename = generate_filename(text_input, "md")
create_file(filename, text_input, return_text, should_save=True)
return return_text
def save_image(image_input, filename):
with open(filename, "wb") as f:
f.write(image_input.getvalue())
return filename
def process_image(image_input):
if image_input:
with st.chat_message("user"):
st.markdown('Processing image: ' + image_input.name)
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
st.session_state.messages.append({"role": "user", "content": [{"type": "text", "text": "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."}, {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}]})
response = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, temperature=0.0)
image_response = response.choices[0].message.content
with st.chat_message("assistant"):
st.markdown(image_response)
filename_md, filename_img = generate_filename(image_input.name + '- ' + image_response, "md"), image_input.name
create_file(filename_md, image_response, '', True)
with open(filename_md, "w", encoding="utf-8") as f:
f.write(image_response)
save_image(image_input, filename_img)
st.session_state.messages.append({"role": "assistant", "content": image_response})
return image_response
def process_audio(audio_input):
if audio_input:
st.session_state.messages.append({"role": "user", "content": audio_input})
transcription = client.audio.transcriptions.create(model="whisper-1", file=audio_input)
response = client.chat.completions.create(model=MODEL, messages=[{"role": "system", "content":"You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."}, {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}]}], temperature=0)
audio_response = response.choices[0].message.content
with st.chat_message("assistant"):
st.markdown(audio_response)
filename = generate_filename(transcription.text, "md")
create_file(filename, transcription.text, audio_response, should_save=True)
st.session_state.messages.append({"role": "assistant", "content": audio_response})
def process_audio_and_video(video_input):
if video_input is not None:
video_path = save_video(video_input)
base64Frames, audio_path = process_video(video_path, seconds_per_frame=1)
transcript = process_audio_for_video(video_input)
st.session_state.messages.append({"role": "user", "content": ["These are the frames from the video.", *map(lambda x: {"type": "image_url", "image_url": {"url": f'data:image/jpg;base64,{x}', "detail": "low"}}, base64Frames), {"type": "text", "text": f"The audio transcription is: {transcript}"}]})
response = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, temperature=0)
video_response = response.choices[0].message.content
with st.chat_message("assistant"):
st.markdown(video_response)
filename = generate_filename(transcript, "md")
create_file(filename, transcript, video_response, should_save=True)
st.session_state.messages.append({"role": "assistant", "content": video_response})
def process_audio_for_video(video_input):
if video_input:
st.session_state.messages.append({"role": "user", "content": video_input})
transcription = client.audio.transcriptions.create(model="whisper-1", file=video_input)
response = client.chat.completions.create(model=MODEL, messages=[{"role": "system", "content":"You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."}, {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}]}], temperature=0)
video_response = response.choices[0].message.content
with st.chat_message("assistant"):
st.markdown(video_response)
filename = generate_filename(transcription.text, "md")
create_file(filename, transcription.text, video_response, should_save=True)
st.session_state.messages.append({"role": "assistant", "content": video_response})
return video_response
def save_video(video_file):
with open(video_file.name, "wb") as f:
f.write(video_file.getbuffer())
return video_file.name
def process_video(video_path, seconds_per_frame=2):
base64Frames, base_video_path = [], os.path.splitext(video_path)[0]
video, total_frames, fps = cv2.VideoCapture(video_path), int(cv2.VideoCapture(video_path).get(cv2.CAP_PROP_FRAME_COUNT)), cv2.VideoCapture(video_path).get(cv2.CAP_PROP_FPS)
curr_frame, frames_to_skip = 0, int(fps * seconds_per_frame)
while curr_frame < total_frames - 1:
video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
success, frame = video.read()
if not success: break
_, buffer = cv2.imencode(".jpg", frame)
base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
curr_frame += frames_to_skip
video.release()
audio_path = f"{base_video_path}.mp3"
clip = VideoFileClip(video_path)
clip.audio.write_audiofile(audio_path, bitrate="32k")
clip.audio.close()
clip.close()
print(f"Extracted {len(base64Frames)} frames")
print(f"Extracted audio to {audio_path}")
return base64Frames, audio_path
def save_and_play_audio(audio_recorder):
audio_bytes = audio_recorder(key='audio_recorder')
if audio_bytes:
filename = generate_filename("Recording", "wav")
with open(filename, 'wb') as f:
f.write(audio_bytes)
st.audio(audio_bytes, format="audio/wav")
return filename
return None
@st.cache_resource
def display_videos_and_links(num_columns):
video_files = [f for f in os.listdir('.') if f.endswith('.mp4')]
if not video_files:
st.write("No MP4 videos found in the current directory.")
return
video_files_sorted = sorted(video_files, key=lambda x: len(x.split('.')[0]))
cols = st.columns(num_columns) # Define num_columns columns outside the loop
col_index = 0 # Initialize column index
for video_file in video_files_sorted:
with cols[col_index % num_columns]: # Use modulo 2 to alternate between the first and second column
k = video_file.split('.')[0] # Assumes keyword is the file name without extension
st.video(video_file, format='video/mp4', start_time=0)
display_glossary_entity(k)
col_index += 1 # Increment column index to place the next video in the next column
@st.cache_resource
def display_images_and_wikipedia_summaries(num_columns=4):
image_files = [f for f in os.listdir('.') if f.endswith('.png')]
if not image_files:
st.write("No PNG images found in the current directory.")
return
image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
cols = st.columns(num_columns) # Use specified num_columns for layout
col_index = 0 # Initialize column index for cycling through columns
for image_file in image_files_sorted:
with cols[col_index % num_columns]: # Cycle through columns based on num_columns
image = Image.open(image_file)
st.image(image, caption=image_file, use_column_width=True)
k = image_file.split('.')[0] # Assumes keyword is the file name without extension
#display_glossary_entity(k)
col_index += 1 # Increment to move to the next column in the next iteration
def main():
st.markdown("##### GPT-4o Omni Model: Text, Audio, Image, & Video")
option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
if option == "Text":
text_input = st.chat_input("Enter your text:")
if text_input:
process_text(text_input)
elif option == "Image":
image_input = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
process_image(image_input)
elif option == "Audio":
audio_input = st.file_uploader("Upload an audio file", type=["mp3", "wav"])
process_audio(audio_input)
elif option == "Video":
video_input = st.file_uploader("Upload a video file", type=["mp4"])
process_audio_and_video(video_input)
all_files = sorted(glob.glob("*.md"), key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10]
st.sidebar.title("File Gallery")
for file in all_files:
with st.sidebar.expander(file), open(file, "r", encoding="utf-8") as f:
st.code(f.read(), language="markdown")
if prompt := st.chat_input("GPT-4o Multimodal ChatBot - What can I help you with?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, stream=True)
response = process_text2(text_input=prompt)
st.session_state.messages.append({"role": "assistant", "content": response})
filename = save_and_play_audio(audio_recorder)
if filename is not None:
transcript = transcribe_canary(filename)
result = search_arxiv(transcript)
st.session_state.messages.append({"role": "user", "content": transcript})
with st.chat_message("user"):
st.markdown(transcript)
with st.chat_message("assistant"):
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, stream=True)
response = process_text2(text_input=prompt)
st.session_state.messages.append({"role": "assistant", "content": response})
# Image and Video Galleries
num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=5)
display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=5)
display_videos_and_links(num_columns_video) # Video Jump Grid
if __name__ == "__main__":
main()