File size: 3,653 Bytes
2d04dd9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
from datasets import Dataset, DatasetDict, load_dataset
from datasets.features import Audio
import pandas as pd
import torch
from transformers import WhisperFeatureExtractor, WhisperTokenizer, WhisperProcessor
import gc  # for garbage collection

# Function to load your custom dataset
def load_custom_dataset(data_dir):
    data = {
        "audio": [],
        "text": []
    }

    wav_dir = os.path.join(data_dir, 'wav')
    txt_dir = os.path.join(data_dir, 'transcription')

    # Assuming filenames in 'wav' and 'txt' match
    for wav_file in os.listdir(wav_dir):
        if wav_file.endswith('.wav'):
            txt_file = wav_file.replace('.wav', '.txt')
            wav_path = os.path.join(wav_dir, wav_file)
            txt_path = os.path.join(txt_dir, txt_file)

            # Read the transcription text
            with open(txt_path, 'r', encoding='utf-8') as f:
                transcription = f.read().strip()

            # Append to the dataset
            data["audio"].append(wav_path)
            data["text"].append(transcription)

    # Create a pandas dataframe
    df = pd.DataFrame(data)

    # Convert to a Hugging Face dataset
    dataset = Dataset.from_pandas(df)

    # Define the audio feature (for .wav files)
    dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))  # Adjust the sampling rate if needed

    return dataset

# Load your custom dataset
custom_train_dataset = load_custom_dataset("./")

# Load Common Voice test set (Malayalam)
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)

common_voice_test = common_voice_test.select_columns(["audio", "sentence"])

# Combine them into a DatasetDict
dataset_dict = DatasetDict({
    "train": custom_train_dataset,
    "test": common_voice_test
})

# Load Whisper models for feature extraction and tokenization
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")

# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")

# Function to prepare dataset (with GPU support)
from torch.amp import autocast

def prepare_dataset(batch):
    audio_arrays = [item["array"] for item in batch["audio"]]
    sampling_rates = [item["sampling_rate"] for item in batch["audio"]]

    features = []
    for audio, sr in zip(audio_arrays, sampling_rates):
        # Move audio to GPU and convert to tensor
        audio_tensor = torch.tensor(audio).to(device)
        
        # Use autocast for mixed precision
        with autocast('cuda'):
            audio_tensor_cpu = audio_tensor.cpu().numpy()  # Convert to NumPy for feature extraction

        feature = feature_extractor(audio_tensor_cpu, sampling_rate=sr).input_features[0]
        features.append(feature)

        # Clean up memory
        del audio_tensor
        gc.collect()

    batch["input_features"] = features
    batch["labels"] = [tokenizer(text).input_ids for text in batch["text"]]
    return batch

# Use Dataset.map to apply the function
dataset_dict = dataset_dict.map(
    prepare_dataset,
    remove_columns=dataset_dict.column_names["train"],
    batch_size=1024,  # Process smaller batches
    batched=True,   # Enable batched processing
)

# Save the processed dataset to disk
dataset_dict.save_to_disk("processed_dataset")

# Check a sample from the train set
print(dataset_dict['train'][0])