Datasets:

Modalities:
Audio
Libraries:
Datasets
bci-dataset-v4 / make7.py
sanjay7178's picture
Add files using upload-large-folder tool
e8eaad6 verified
import os
import pandas as pd
import re
from collections import defaultdict
def analyze_fif_paths(root_dir="split_fifs"):
"""
Extract .fif file paths using regex and provide detailed analysis including emotion types.
"""
# Define emotion mapping with corrected IDs
emotion_mapping = {
# Neutral (ID: 0)
'neutralVideo': {'id': 0, 'emotion': 'neutral'},
# Excited (ID: 1)
'ratatChaseScene': {'id': 1, 'emotion': 'excited'},
'Kenmiles': {'id': 1, 'emotion': 'excited'},
'NBA': {'id': 1, 'emotion': 'excited'},
# Happy (ID: 2)
'scooby': {'id': 2, 'emotion': 'happy'},
# Happy (ID: 3)
'motivationalAuthor': {'id': 3, 'emotion': 'motivated'},
# Relaxed (ID: 4)
'waterfall': {'id': 4, 'emotion': 'relaxed'},
'asmr': {'id': 4, 'emotion': 'relaxed'},
'meditation': {'id': 4, 'emotion': 'relaxed'},
# Sad (ID: 5)
'saddogs': {'id': 5, 'emotion': 'sad'},
'sadbaby': {'id': 5, 'emotion': 'sad'},
'ChampDeath': {'id': 5, 'emotion': 'sad'},
'sadBaby1': {'id': 5, 'emotion': 'sad'},
# Horror (ID: 6)
'conjuring': {'id': 6, 'emotion': 'horror'},
# Angry (ID: 7)
'angrydogs': {'id': 7, 'emotion': 'angry'},
'thepiano': {'id': 7, 'emotion': 'angry'},
# Disgusted (ID: 8)
'trainspotting': {'id': 8, 'emotion': 'disgusted'},
# Utility files (no emotion)
'label': {'id': -1, 'emotion': 'utility'},
'rating': {'id': -1, 'emotion': 'utility'},
'navon': {'id': -1, 'emotion': 'utility'}
}
# Dictionary to store analysis data
analysis = {
'total_files': 0,
'directory_counts': defaultdict(int),
'subject_counts': defaultdict(int),
'epoch_counts': defaultdict(int),
'video_type_counts': defaultdict(int),
'emotion_counts': defaultdict(int),
'emotion_id_counts': defaultdict(int)
}
# List to store file paths
file_data = []
# Regex pattern for .fif files
pattern = r'(\d+|Zacker)-mapped_epoch_(\d+)_(\w+)raw_interval_(\d+)\.raw\.fif$'
# Walk through the directory
for dirpath, dirnames, filenames in os.walk(root_dir):
for filename in filenames:
match = re.match(pattern, filename)
if match:
full_path = os.path.join(dirpath, filename)
# Extract components from filename
subject_id = match.group(1)
epoch_num = match.group(2)
video_type = match.group(3)
# Get emotion data if video type is in mapping
emotion_info = emotion_mapping.get(video_type, {'id': -1, 'emotion': 'unknown'})
emotion_id = emotion_info['id']
emotion = emotion_info['emotion']
# Update counts
analysis['total_files'] += 1
analysis['directory_counts'][dirpath] += 1
analysis['subject_counts'][subject_id] += 1
analysis['epoch_counts'][epoch_num] += 1
analysis['video_type_counts'][video_type] += 1
if emotion != 'utility': # Only count actual emotions
analysis['emotion_counts'][emotion] += 1
analysis['emotion_id_counts'][emotion_id] += 1
# Add to file data
file_data.append({
'file_path': full_path,
'subject_id': subject_id,
'epoch': int(epoch_num),
'video_type': video_type,
'emotion_id': emotion_id,
'emotion': emotion
})
# Create DataFrame
df = pd.DataFrame(file_data)
# Sort the DataFrame
df = df.sort_values(['subject_id', 'epoch', 'file_path'])
# Save the full analysis to CSV
output_file = 'fif_file_analysis4.csv'
df.to_csv(output_file, index=False)
# Create a filtered DataFrame with only emotion-related files
emotion_df = df[df['emotion_id'] >= 0].copy()
emotion_output_file = 'emotion_files.csv'
emotion_df.to_csv(emotion_output_file, index=False)
# Print the analysis
print_analysis(analysis, df)
return df, analysis
def print_analysis(analysis, df):
"""Print detailed analysis of the .fif files."""
print("\n" + "="*50)
print("FIF FILES ANALYSIS REPORT")
print("="*50)
# Overall Statistics
print("\n1. OVERALL STATISTICS")
print("-"*30)
print(f"Total .fif files found: {analysis['total_files']}")
print(f"Number of subjects: {len(analysis['subject_counts'])}")
print(f"Number of directories: {len(analysis['directory_counts'])}")
# Subject Breakdown
print("\n2. FILES PER SUBJECT")
print("-"*30)
for subject, count in sorted(analysis['subject_counts'].items()):
print(f"Subject {subject}: {count} files")
# Directory Breakdown
print("\n3. FILES PER DIRECTORY")
print("-"*30)
for directory, count in sorted(analysis['directory_counts'].items()):
rel_path = os.path.relpath(directory, "splif_fifs")
print(f"{rel_path}: {count} files")
# Emotion Analysis
print("\n4. FILES PER EMOTION")
print("-"*30)
# Define the exact order of emotions with their IDs
ordered_emotions = [
(0, 'neutral'),
(1, 'excited'),
(2, 'happy'),
(3, 'relaxed'),
(4, 'sad'),
(5, 'horror'),
(6, 'angry'),
(7, 'disgusted')
]
for emotion_id, emotion_name in ordered_emotions:
if emotion_id in analysis['emotion_id_counts']:
count = analysis['emotion_id_counts'][emotion_id]
print(f"Emotion ID {emotion_id} {emotion_name}: {count} files")
# Print utility files separately
utility_count = len(df[df['emotion'] == 'utility'])
if utility_count > 0:
print(f"\nUtility files (rating/label/navon): {utility_count} files")
# Video Type Analysis
print("\n5. FILES PER VIDEO TYPE")
print("-"*30)
for video_type, count in sorted(analysis['video_type_counts'].items()):
print(f"{video_type}: {count} files")
# Epoch Analysis
print("\n6. FILES PER EPOCH")
print("-"*30)
for epoch, count in sorted(analysis['epoch_counts'].items(), key=lambda x: int(x[0])):
print(f"Epoch {epoch}: {count} files")
# CSV File Information
print("\n7. CSV FILE OUTPUTS")
print("-"*30)
print("1. Full analysis file: fif_file_analysis.csv")
print("2. Emotion-only file: emotion_files.csv")
print("\nColumns:")
for col in df.columns:
print(f"- {col}")
# Additional Statistics
print("\n8. ADDITIONAL STATISTICS")
print("-"*30)
if analysis['total_files'] > 0:
avg_files_per_dir = analysis['total_files'] / len(analysis['directory_counts'])
avg_files_per_subject = analysis['total_files'] / len(analysis['subject_counts'])
print(f"Average files per directory: {avg_files_per_dir:.2f}")
print(f"Average files per subject: {avg_files_per_subject:.2f}")
# Calculate percentages of emotion files
emotion_files = sum(analysis['emotion_id_counts'].values())
utility_files = utility_count
print(f"\nEmotion files: {emotion_files} ({(emotion_files/analysis['total_files']*100):.1f}%)")
print(f"Utility files: {utility_files} ({(utility_files/analysis['total_files']*100):.1f}%)")
if __name__ == "__main__":
# Run analysis
df, analysis = analyze_fif_paths()