invincible-jha's picture
Upload 4 files
d894230 verified
from .model_manager import ModelManager
from .audio_processor import AudioProcessor
from typing import Dict
class Analyzer:
def __init__(self, model_manager: ModelManager, audio_processor: AudioProcessor):
self.model_manager = model_manager
self.audio_processor = audio_processor
self.model_manager.load_models()
def analyze(self, audio_path: str) -> Dict:
# Process audio
waveform, features = self.audio_processor.process_audio(audio_path)
# Get transcription
transcription = self.model_manager.transcribe(waveform)
# Analyze emotions
emotions = self.model_manager.analyze_emotions(transcription)
# Analyze mental health indicators
mental_health = self.model_manager.analyze_mental_health(transcription)
# Combine analysis with audio features
mental_health = self._combine_analysis(mental_health, features)
return {
'transcription': transcription,
'emotions': {
'scores': emotions,
'dominant_emotion': max(emotions.items(), key=lambda x: x[1])[0]
},
'mental_health_indicators': mental_health,
'audio_features': features
}
def _combine_analysis(self, mental_health: Dict, features: Dict) -> Dict:
"""Combine mental health analysis with audio features"""
# Adjust risk scores based on audio features
energy_level = features['energy']['mean']
pitch_variability = features['pitch']['std']
# Simple risk score adjustment based on audio features
mental_health['depression_risk'] = (
mental_health['depression_risk'] * 0.7 +
(1 - energy_level) * 0.3 # Lower energy may indicate depression
)
mental_health['anxiety_risk'] = (
mental_health['anxiety_risk'] * 0.7 +
pitch_variability * 0.3 # Higher pitch variability may indicate anxiety
)
# Add confidence scores
mental_health['confidence'] = {
'depression': 0.8, # Example confidence scores
'anxiety': 0.8,
'stress': 0.7
}
return mental_health