import gradio as gr import cv2 from extract_frames import ExtractFrames from get_every_fram_path import getEveryFramPath from main_emotion_classifier import process, process_single_image from grapher import createGraph def process_image(image): # Process the image using your existing function processed_image = process_single_image(image) return processed_image def process_video(video_path): # Extract frames from the video and process them output_dir = ExtractFrames(video_path) frame_paths = getEveryFramPath(output_dir) results, most_frequent_emotion = process(frame_paths) # Create the emotion graphs from the results createGraph('data/output/results.txt') # Return paths to the three generated graphs return [ 'data/output/emotion_bar_plot.png', 'data/output/emotion_stem_plot.png', 'data/output/emotionAVG.png' ] def gradio_interface(file): if file is None: return None, None file_type = file.name.split('.')[-1].lower() if file_type in ['jpg', 'jpeg', 'png', 'bmp']: # Image input image = cv2.imread(file.name) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) processed_image = process_image(image) return processed_image, None elif file_type in ['mp4', 'avi', 'mov', 'wmv']: # Video input graph_paths = process_video(file.name) return None, graph_paths else: return None, None # Set up the Gradio Interface iface = gr.Interface( fn=gradio_interface, inputs=gr.File(label="Upload Image or Video"), outputs=[ gr.Image(type="numpy", label="Processed Image (for image uploads)"), gr.Gallery(label="Emotion Distribution Graphs (for video uploads)", columns=3) ], title="Face Emotion Recognition", description="Upload an image or video to analyze emotions. For images, the result will show detected faces with emotions. For videos, it will provide graphs of emotion distribution." ) # Launch the Gradio interface if __name__ == "__main__": iface.launch()