|
import os |
|
from datasets import Dataset, DatasetDict, load_dataset |
|
from datasets.features import Audio |
|
import pandas as pd |
|
import torch |
|
from transformers import WhisperFeatureExtractor, WhisperTokenizer, WhisperProcessor |
|
import gc |
|
|
|
|
|
def load_custom_dataset(data_dir): |
|
data = { |
|
"audio": [], |
|
"text": [] |
|
} |
|
|
|
wav_dir = os.path.join(data_dir, 'wav') |
|
txt_dir = os.path.join(data_dir, 'transcription') |
|
|
|
|
|
for wav_file in os.listdir(wav_dir): |
|
if wav_file.endswith('.wav'): |
|
txt_file = wav_file.replace('.wav', '.txt') |
|
wav_path = os.path.join(wav_dir, wav_file) |
|
txt_path = os.path.join(txt_dir, txt_file) |
|
|
|
|
|
with open(txt_path, 'r', encoding='utf-8') as f: |
|
transcription = f.read().strip() |
|
|
|
|
|
data["audio"].append(wav_path) |
|
data["text"].append(transcription) |
|
|
|
|
|
df = pd.DataFrame(data) |
|
|
|
|
|
dataset = Dataset.from_pandas(df) |
|
|
|
|
|
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) |
|
|
|
return dataset |
|
|
|
|
|
custom_train_dataset = load_custom_dataset("./") |
|
|
|
|
|
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True) |
|
|
|
common_voice_test = common_voice_test.select_columns(["audio", "sentence"]) |
|
|
|
|
|
dataset_dict = DatasetDict({ |
|
"train": custom_train_dataset, |
|
"test": common_voice_test |
|
}) |
|
|
|
|
|
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small") |
|
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe") |
|
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe") |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
|
|
|
|
from torch.amp import autocast |
|
|
|
def prepare_dataset(batch): |
|
audio_arrays = [item["array"] for item in batch["audio"]] |
|
sampling_rates = [item["sampling_rate"] for item in batch["audio"]] |
|
|
|
features = [] |
|
for audio, sr in zip(audio_arrays, sampling_rates): |
|
|
|
audio_tensor = torch.tensor(audio).to(device) |
|
|
|
|
|
with autocast('cuda'): |
|
audio_tensor_cpu = audio_tensor.cpu().numpy() |
|
|
|
feature = feature_extractor(audio_tensor_cpu, sampling_rate=sr).input_features[0] |
|
features.append(feature) |
|
|
|
|
|
del audio_tensor |
|
gc.collect() |
|
|
|
batch["input_features"] = features |
|
batch["labels"] = [tokenizer(text).input_ids for text in batch["text"]] |
|
return batch |
|
|
|
|
|
dataset_dict = dataset_dict.map( |
|
prepare_dataset, |
|
remove_columns=dataset_dict.column_names["train"], |
|
batch_size=1024, |
|
batched=True, |
|
) |
|
|
|
|
|
dataset_dict.save_to_disk("processed_dataset") |
|
|
|
|
|
print(dataset_dict['train'][0]) |
|
|