|
from .model_manager import ModelManager |
|
from .audio_processor import AudioProcessor |
|
from typing import Dict |
|
|
|
class Analyzer: |
|
def __init__(self, model_manager: ModelManager, audio_processor: AudioProcessor): |
|
self.model_manager = model_manager |
|
self.audio_processor = audio_processor |
|
self.model_manager.load_models() |
|
|
|
def analyze(self, audio_path: str) -> Dict: |
|
|
|
waveform, features = self.audio_processor.process_audio(audio_path) |
|
|
|
|
|
transcription = self.model_manager.transcribe(waveform) |
|
|
|
|
|
emotions = self.model_manager.analyze_emotions(transcription) |
|
|
|
|
|
mental_health = self.model_manager.analyze_mental_health(transcription) |
|
|
|
|
|
mental_health = self._combine_analysis(mental_health, features) |
|
|
|
return { |
|
'transcription': transcription, |
|
'emotions': { |
|
'scores': emotions, |
|
'dominant_emotion': max(emotions.items(), key=lambda x: x[1])[0] |
|
}, |
|
'mental_health_indicators': mental_health, |
|
'audio_features': features |
|
} |
|
|
|
def _combine_analysis(self, mental_health: Dict, features: Dict) -> Dict: |
|
"""Combine mental health analysis with audio features""" |
|
|
|
energy_level = features['energy']['mean'] |
|
pitch_variability = features['pitch']['std'] |
|
|
|
|
|
mental_health['depression_risk'] = ( |
|
mental_health['depression_risk'] * 0.7 + |
|
(1 - energy_level) * 0.3 |
|
) |
|
|
|
mental_health['anxiety_risk'] = ( |
|
mental_health['anxiety_risk'] * 0.7 + |
|
pitch_variability * 0.3 |
|
) |
|
|
|
|
|
mental_health['confidence'] = { |
|
'depression': 0.8, |
|
'anxiety': 0.8, |
|
'stress': 0.7 |
|
} |
|
|
|
return mental_health |