|
import os |
|
import pandas as pd |
|
import re |
|
from collections import defaultdict |
|
|
|
def analyze_fif_paths(root_dir="split_fifs"): |
|
""" |
|
Extract .fif file paths using regex and provide detailed analysis including emotion types. |
|
""" |
|
|
|
emotion_mapping = { |
|
|
|
'neutralVideo': {'id': 0, 'emotion': 'neutral'}, |
|
|
|
|
|
'ratatChaseScene': {'id': 1, 'emotion': 'excited'}, |
|
'Kenmiles': {'id': 1, 'emotion': 'excited'}, |
|
'NBA': {'id': 1, 'emotion': 'excited'}, |
|
|
|
|
|
'scooby': {'id': 2, 'emotion': 'happy'}, |
|
|
|
|
|
'motivationalAuthor': {'id': 3, 'emotion': 'motivated'}, |
|
|
|
|
|
'waterfall': {'id': 4, 'emotion': 'relaxed'}, |
|
'asmr': {'id': 4, 'emotion': 'relaxed'}, |
|
'meditation': {'id': 4, 'emotion': 'relaxed'}, |
|
|
|
|
|
'saddogs': {'id': 5, 'emotion': 'sad'}, |
|
'sadbaby': {'id': 5, 'emotion': 'sad'}, |
|
'ChampDeath': {'id': 5, 'emotion': 'sad'}, |
|
'sadBaby1': {'id': 5, 'emotion': 'sad'}, |
|
|
|
|
|
'conjuring': {'id': 6, 'emotion': 'horror'}, |
|
|
|
|
|
'angrydogs': {'id': 7, 'emotion': 'angry'}, |
|
'thepiano': {'id': 7, 'emotion': 'angry'}, |
|
|
|
|
|
'trainspotting': {'id': 8, 'emotion': 'disgusted'}, |
|
|
|
|
|
'label': {'id': -1, 'emotion': 'utility'}, |
|
'rating': {'id': -1, 'emotion': 'utility'}, |
|
'navon': {'id': -1, 'emotion': 'utility'} |
|
} |
|
|
|
|
|
analysis = { |
|
'total_files': 0, |
|
'directory_counts': defaultdict(int), |
|
'subject_counts': defaultdict(int), |
|
'epoch_counts': defaultdict(int), |
|
'video_type_counts': defaultdict(int), |
|
'emotion_counts': defaultdict(int), |
|
'emotion_id_counts': defaultdict(int) |
|
} |
|
|
|
|
|
file_data = [] |
|
|
|
|
|
pattern = r'(\d+|Zacker)-mapped_epoch_(\d+)_(\w+)raw_interval_(\d+)\.raw\.fif$' |
|
|
|
|
|
for dirpath, dirnames, filenames in os.walk(root_dir): |
|
for filename in filenames: |
|
match = re.match(pattern, filename) |
|
if match: |
|
full_path = os.path.join(dirpath, filename) |
|
|
|
|
|
subject_id = match.group(1) |
|
epoch_num = match.group(2) |
|
video_type = match.group(3) |
|
|
|
|
|
emotion_info = emotion_mapping.get(video_type, {'id': -1, 'emotion': 'unknown'}) |
|
emotion_id = emotion_info['id'] |
|
emotion = emotion_info['emotion'] |
|
|
|
|
|
analysis['total_files'] += 1 |
|
analysis['directory_counts'][dirpath] += 1 |
|
analysis['subject_counts'][subject_id] += 1 |
|
analysis['epoch_counts'][epoch_num] += 1 |
|
analysis['video_type_counts'][video_type] += 1 |
|
if emotion != 'utility': |
|
analysis['emotion_counts'][emotion] += 1 |
|
analysis['emotion_id_counts'][emotion_id] += 1 |
|
|
|
|
|
file_data.append({ |
|
'file_path': full_path, |
|
'subject_id': subject_id, |
|
'epoch': int(epoch_num), |
|
'video_type': video_type, |
|
'emotion_id': emotion_id, |
|
'emotion': emotion |
|
}) |
|
|
|
|
|
df = pd.DataFrame(file_data) |
|
|
|
|
|
df = df.sort_values(['subject_id', 'epoch', 'file_path']) |
|
|
|
|
|
output_file = 'fif_file_analysis4.csv' |
|
df.to_csv(output_file, index=False) |
|
|
|
|
|
emotion_df = df[df['emotion_id'] >= 0].copy() |
|
emotion_output_file = 'emotion_files.csv' |
|
emotion_df.to_csv(emotion_output_file, index=False) |
|
|
|
|
|
print_analysis(analysis, df) |
|
|
|
return df, analysis |
|
|
|
def print_analysis(analysis, df): |
|
"""Print detailed analysis of the .fif files.""" |
|
|
|
print("\n" + "="*50) |
|
print("FIF FILES ANALYSIS REPORT") |
|
print("="*50) |
|
|
|
|
|
print("\n1. OVERALL STATISTICS") |
|
print("-"*30) |
|
print(f"Total .fif files found: {analysis['total_files']}") |
|
print(f"Number of subjects: {len(analysis['subject_counts'])}") |
|
print(f"Number of directories: {len(analysis['directory_counts'])}") |
|
|
|
|
|
print("\n2. FILES PER SUBJECT") |
|
print("-"*30) |
|
for subject, count in sorted(analysis['subject_counts'].items()): |
|
print(f"Subject {subject}: {count} files") |
|
|
|
|
|
print("\n3. FILES PER DIRECTORY") |
|
print("-"*30) |
|
for directory, count in sorted(analysis['directory_counts'].items()): |
|
rel_path = os.path.relpath(directory, "splif_fifs") |
|
print(f"{rel_path}: {count} files") |
|
|
|
|
|
print("\n4. FILES PER EMOTION") |
|
print("-"*30) |
|
|
|
ordered_emotions = [ |
|
(0, 'neutral'), |
|
(1, 'excited'), |
|
(2, 'happy'), |
|
(3, 'relaxed'), |
|
(4, 'sad'), |
|
(5, 'horror'), |
|
(6, 'angry'), |
|
(7, 'disgusted') |
|
] |
|
|
|
for emotion_id, emotion_name in ordered_emotions: |
|
if emotion_id in analysis['emotion_id_counts']: |
|
count = analysis['emotion_id_counts'][emotion_id] |
|
print(f"Emotion ID {emotion_id} {emotion_name}: {count} files") |
|
|
|
|
|
utility_count = len(df[df['emotion'] == 'utility']) |
|
if utility_count > 0: |
|
print(f"\nUtility files (rating/label/navon): {utility_count} files") |
|
|
|
|
|
print("\n5. FILES PER VIDEO TYPE") |
|
print("-"*30) |
|
for video_type, count in sorted(analysis['video_type_counts'].items()): |
|
print(f"{video_type}: {count} files") |
|
|
|
|
|
print("\n6. FILES PER EPOCH") |
|
print("-"*30) |
|
for epoch, count in sorted(analysis['epoch_counts'].items(), key=lambda x: int(x[0])): |
|
print(f"Epoch {epoch}: {count} files") |
|
|
|
|
|
print("\n7. CSV FILE OUTPUTS") |
|
print("-"*30) |
|
print("1. Full analysis file: fif_file_analysis.csv") |
|
print("2. Emotion-only file: emotion_files.csv") |
|
print("\nColumns:") |
|
for col in df.columns: |
|
print(f"- {col}") |
|
|
|
|
|
print("\n8. ADDITIONAL STATISTICS") |
|
print("-"*30) |
|
if analysis['total_files'] > 0: |
|
avg_files_per_dir = analysis['total_files'] / len(analysis['directory_counts']) |
|
avg_files_per_subject = analysis['total_files'] / len(analysis['subject_counts']) |
|
print(f"Average files per directory: {avg_files_per_dir:.2f}") |
|
print(f"Average files per subject: {avg_files_per_subject:.2f}") |
|
|
|
|
|
emotion_files = sum(analysis['emotion_id_counts'].values()) |
|
utility_files = utility_count |
|
print(f"\nEmotion files: {emotion_files} ({(emotion_files/analysis['total_files']*100):.1f}%)") |
|
print(f"Utility files: {utility_files} ({(utility_files/analysis['total_files']*100):.1f}%)") |
|
|
|
if __name__ == "__main__": |
|
|
|
df, analysis = analyze_fif_paths() |
|
|