Datasets:
Delete alphanumeric-audio-dataset.py
Browse files
alphanumeric-audio-dataset.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pandas as pd
|
3 |
-
from datasets import Dataset, Audio
|
4 |
-
|
5 |
-
def load_metadata(metadata_path):
|
6 |
-
"""Load the metadata CSV file."""
|
7 |
-
return pd.read_csv(metadata_path)
|
8 |
-
|
9 |
-
def generate_dataset_dict(metadata, audio_folder):
|
10 |
-
"""
|
11 |
-
Generate the dataset dictionary by mapping metadata to corresponding audio files.
|
12 |
-
"""
|
13 |
-
data = {
|
14 |
-
"file_name_id": [],
|
15 |
-
"audio": [],
|
16 |
-
"text": [],
|
17 |
-
"age": [],
|
18 |
-
"gender": [],
|
19 |
-
"nationality": [],
|
20 |
-
"native_language": [],
|
21 |
-
"familiarity_with_english": [],
|
22 |
-
"accent_strength": [],
|
23 |
-
"difficulties": [],
|
24 |
-
"recording_machine": [],
|
25 |
-
}
|
26 |
-
|
27 |
-
for id in metadata['Response_ID'].unique():
|
28 |
-
|
29 |
-
df_subset = metadata[metadata['Response_ID']==id]
|
30 |
-
|
31 |
-
# Collect audio paths for different categories
|
32 |
-
name_audio_path = df_subset['file_name'].iloc[0]
|
33 |
-
number_audio_path = df_subset['file_name'].iloc[2]
|
34 |
-
address_audio_path = df_subset['file_name'].iloc[1]
|
35 |
-
|
36 |
-
row = df_subset.iloc[0]
|
37 |
-
|
38 |
-
# Only include data if all three audio files exist
|
39 |
-
if name_audio_path and number_audio_path and address_audio_path:
|
40 |
-
data["file_name_id"].append(id)
|
41 |
-
data["audio"].append({
|
42 |
-
"name_audio": name_audio_path,
|
43 |
-
"number_audio": number_audio_path,
|
44 |
-
"address_audio": address_audio_path,
|
45 |
-
})
|
46 |
-
data["text"].append({
|
47 |
-
"name": row["Name"],
|
48 |
-
"number": row["Number"],
|
49 |
-
"address": row["Address"],
|
50 |
-
})
|
51 |
-
data["age"].append(row["Age"])
|
52 |
-
data["gender"].append(row["Gender"])
|
53 |
-
data["nationality"].append(row["Nationality"])
|
54 |
-
data["native_language"].append(row["Native Language"])
|
55 |
-
data["familiarity_with_english"].append(row["Familiarity with English"])
|
56 |
-
data["accent_strength"].append(row["Accent Strength (Self reported)"])
|
57 |
-
data["difficulties"].append(row["Difficulties"])
|
58 |
-
data["recording_machine"].append(row["Recording Machine"])
|
59 |
-
|
60 |
-
return data
|
61 |
-
|
62 |
-
def load_dataset(metadata_path="metadata.csv", audio_folder="audio_data"):
|
63 |
-
"""
|
64 |
-
Load the dataset, mapping metadata to audio and other fields.
|
65 |
-
"""
|
66 |
-
metadata = load_metadata(metadata_path)
|
67 |
-
dataset_dict = generate_dataset_dict(metadata, audio_folder)
|
68 |
-
|
69 |
-
# Use the Audio feature from the Hugging Face Datasets library
|
70 |
-
return Dataset.from_dict(dataset_dict).cast_column("audio", Audio())
|
71 |
-
|
72 |
-
if __name__ == "__main__":
|
73 |
-
# Load the dataset and print a sample
|
74 |
-
dataset = load_dataset()
|
75 |
-
print(dataset)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|