import gradio as gr
import os
from predict import predict_healing_music
import train_model
import logging
import tempfile
import time
import shutil
import socket
import joblib
# Set up logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
def find_free_port(start_port=7860, max_port=7960):
"""Find a free port in the given range."""
for port in range(start_port, max_port + 1):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(('', port))
return port
except OSError:
continue
return None
# Ensure model directory exists
model_dir = os.path.join(os.path.dirname(__file__), "models")
os.makedirs(model_dir, exist_ok=True)
# Model file paths
model_path = os.path.join(model_dir, "model.joblib")
scaler_path = os.path.join(model_dir, "scaler.joblib")
# Check if model exists
if not os.path.exists(model_path) or not os.path.exists(scaler_path):
print('First run: Training the model...')
try:
train_model.train_and_evaluate_model()
print('Model training completed!')
except Exception as e:
print(f'Model training failed: {str(e)}')
raise e
def process_audio(audio_path):
"""
Process and analyze the audio file
"""
if audio_path is None:
return None, None, None, "Please upload an audio file"
model_dir = os.path.join(os.path.dirname(__file__), "models")
model_path = os.path.join(model_dir, "model.joblib")
scaler_path = os.path.join(model_dir, "scaler.joblib")
try:
# Load model and scaler
model = joblib.load(model_path)
scaler = joblib.load(scaler_path)
tmp_file = None
try:
# Create temporary file
suffix = os.path.splitext(audio_path)[1]
tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
shutil.copy2(audio_path, tmp_file.name)
# Make prediction
healing_probability = predict_healing_music(tmp_file.name)
if healing_probability is not None:
# Calculate percentage
healing_percentage = healing_probability * 100
# Generate description
if healing_percentage >= 75:
description = "This music has strong healing properties! ✨"
color = "#15803d" # Dark green
elif healing_percentage >= 50:
description = "This music has moderate healing effects. 🌟"
color = "#0369a1" # Dark blue
else:
description = "This music has limited healing potential. 🎵"
color = "#b91c1c" # Dark red
return f"{healing_percentage:.1f}%", f'
{description}
', None, None
else:
return "Error", "Error analyzing file. Please ensure it's a valid MP3 or WAV file.", None, None
except Exception as e:
logger.error(f"Error during analysis: {str(e)}")
return "Error", f"An unexpected error occurred: {str(e)}", None, None
finally:
# Clean up temporary file
if tmp_file is not None:
try:
tmp_file.close()
os.unlink(tmp_file.name)
except Exception as e:
logger.error(f"Failed to clean up temporary file: {str(e)}")
except Exception as e:
logger.error(f"Error during model loading: {str(e)}")
return "Error", f"An unexpected error occurred: {str(e)}", None, None
def analyze_audio(audio):
"""Analyze the audio file"""
try:
if audio is None:
return [
gr.update(visible=False), # results
gr.update(visible=False), # analyzing
"", # healing_index
"" # result_text
]
# Show analyzing status first
yield [
gr.update(visible=False), # results
gr.update(visible=True), # analyzing
"", # healing_index
"" # result_text
]
# Process audio and get results
index, desc, _, _ = process_audio(audio)
desc_with_hint = f'{desc}To analyze another file, please refresh the page
'
# Return final results
yield [
gr.update(visible=True), # results
gr.update(visible=False), # analyzing
index, # healing_index
desc_with_hint # result_text
]
except Exception as e:
logger.error(f"Error in analyze_audio: {str(e)}")
yield [
gr.update(visible=True), # results
gr.update(visible=False), # analyzing
"Error", # healing_index
f"An error occurred: {str(e)}" # result_text
]
# Custom CSS styles
custom_css = """
.gradio-container {
font-family: 'Inter', -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
max-width: 800px !important;
margin: auto;
padding: 0 1rem;
background-color: #0f1117;
}
.container {
max-width: 700px;
margin: 0 auto;
padding-top: 2rem;
}
.header {
text-align: center;
margin-bottom: 1.5rem;
width: 100%;
display: flex;
justify-content: center;
align-items: center;
}
.title {
font-size: 2.8rem !important;
font-weight: 800 !important;
color: #ffffff !important;
margin: 0 !important;
line-height: 1.2 !important;
text-align: center !important;
letter-spacing: 0.05em !important;
}
.subtitle {
font-size: 1.4rem !important;
text-align: center;
color: #ffffff !important;
margin-top: 1rem !important;
max-width: 800px;
margin-left: auto;
margin-right: auto;
white-space: nowrap !important;
font-weight: 500 !important;
letter-spacing: 0.02em !important;
}
.upload-box {
background-color: #1f2937;
border-radius: 12px;
padding: 2rem;
margin-bottom: 1rem;
border: 2px dashed #6b7280;
transition: all 0.3s ease;
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
}
.upload-area {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
gap: 1rem;
padding: 1.5rem 0;
}
.icon-text-container {
color: #ffffff;
font-size: 1.3rem !important;
font-weight: 600 !important;
letter-spacing: 0.02em !important;
}
.upload-hint {
color: #ffffff !important;
font-size: 1rem !important;
margin-top: 0.5rem !important;
font-style: italic !important;
font-weight: 500 !important;
}
.analyzing-status {
margin: 1rem 0;
background-color: #1f2937;
border-radius: 12px;
padding: 1.5rem;
text-align: center;
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
}
.analyzing-text {
color: #ffffff !important;
font-size: 1.4rem !important;
font-weight: 600 !important;
margin: 0 !important;
letter-spacing: 0.02em !important;
}
.results-container {
background-color: #1f2937;
border-radius: 12px;
padding: 1.5rem;
margin-top: 1rem;
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
}
.result-title {
color: #ffffff !important;
font-size: 1.6rem !important;
font-weight: 700 !important;
margin-bottom: 1rem !important;
letter-spacing: 0.02em !important;
}
.healing-index {
font-size: 3rem !important;
font-weight: 800 !important;
text-align: center;
color: #ffffff !important;
margin: 1rem 0 !important;
letter-spacing: 0.05em !important;
}
.result-text {
color: #ffffff !important;
font-size: 1.2rem !important;
font-weight: 600 !important;
letter-spacing: 0.02em !important;
line-height: 1.5 !important;
padding: 1rem !important;
border-radius: 8px !important;
}
"""
# Create Gradio interface
with gr.Blocks(
title="Healing Music Classifier",
css=custom_css,
theme=gr.themes.Default()
) as demo:
with gr.Column(elem_classes="container"):
with gr.Row(elem_classes="header"):
gr.Markdown("🎵 Healing Music Classifier", elem_classes="title")
gr.Markdown(
"Upload your music file, and our model will analyze its healing potential!",
elem_classes="subtitle"
)
with gr.Column(elem_classes="upload-box"):
with gr.Column(elem_classes="upload-area"):
gr.Markdown("☁️ Drop your audio file here", elem_classes="icon-text-container")
audio_input = gr.Audio(
label="Audio Input",
sources=["upload"],
type="filepath",
elem_classes="audio-input",
interactive=True,
label_visible=False
)
gr.Markdown("Limit 200MB per file • MP3, WAV", elem_classes="upload-hint")
with gr.Column(elem_classes="analyzing-status", visible=False) as analyzing:
gr.Markdown(
"""""",
elem_classes="analyzing-text"
)
with gr.Column(elem_classes="results-container", visible=False) as results:
gr.Markdown("Analysis Results", elem_classes="result-title")
healing_index = gr.Markdown("", elem_classes="healing-index")
result_text = gr.Markdown("", elem_classes="result-text")
# Audio analysis event
audio_input.upload(
fn=analyze_audio,
inputs=[audio_input],
outputs=[
results,
analyzing,
healing_index,
result_text
],
queue=True
)
# Enable queue for the entire app
demo.queue()
# Launch application
if __name__ == "__main__":
demo.launch()