asr_malayalam / gpu_processing.py
aoxo's picture
Upload gpu_processing.py with huggingface_hub
2d04dd9 verified
import os
from datasets import Dataset, DatasetDict, load_dataset
from datasets.features import Audio
import pandas as pd
import torch
from transformers import WhisperFeatureExtractor, WhisperTokenizer, WhisperProcessor
import gc # for garbage collection
# Function to load your custom dataset
def load_custom_dataset(data_dir):
data = {
"audio": [],
"text": []
}
wav_dir = os.path.join(data_dir, 'wav')
txt_dir = os.path.join(data_dir, 'transcription')
# Assuming filenames in 'wav' and 'txt' match
for wav_file in os.listdir(wav_dir):
if wav_file.endswith('.wav'):
txt_file = wav_file.replace('.wav', '.txt')
wav_path = os.path.join(wav_dir, wav_file)
txt_path = os.path.join(txt_dir, txt_file)
# Read the transcription text
with open(txt_path, 'r', encoding='utf-8') as f:
transcription = f.read().strip()
# Append to the dataset
data["audio"].append(wav_path)
data["text"].append(transcription)
# Create a pandas dataframe
df = pd.DataFrame(data)
# Convert to a Hugging Face dataset
dataset = Dataset.from_pandas(df)
# Define the audio feature (for .wav files)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) # Adjust the sampling rate if needed
return dataset
# Load your custom dataset
custom_train_dataset = load_custom_dataset("./")
# Load Common Voice test set (Malayalam)
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)
common_voice_test = common_voice_test.select_columns(["audio", "sentence"])
# Combine them into a DatasetDict
dataset_dict = DatasetDict({
"train": custom_train_dataset,
"test": common_voice_test
})
# Load Whisper models for feature extraction and tokenization
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Function to prepare dataset (with GPU support)
from torch.amp import autocast
def prepare_dataset(batch):
audio_arrays = [item["array"] for item in batch["audio"]]
sampling_rates = [item["sampling_rate"] for item in batch["audio"]]
features = []
for audio, sr in zip(audio_arrays, sampling_rates):
# Move audio to GPU and convert to tensor
audio_tensor = torch.tensor(audio).to(device)
# Use autocast for mixed precision
with autocast('cuda'):
audio_tensor_cpu = audio_tensor.cpu().numpy() # Convert to NumPy for feature extraction
feature = feature_extractor(audio_tensor_cpu, sampling_rate=sr).input_features[0]
features.append(feature)
# Clean up memory
del audio_tensor
gc.collect()
batch["input_features"] = features
batch["labels"] = [tokenizer(text).input_ids for text in batch["text"]]
return batch
# Use Dataset.map to apply the function
dataset_dict = dataset_dict.map(
prepare_dataset,
remove_columns=dataset_dict.column_names["train"],
batch_size=1024, # Process smaller batches
batched=True, # Enable batched processing
)
# Save the processed dataset to disk
dataset_dict.save_to_disk("processed_dataset")
# Check a sample from the train set
print(dataset_dict['train'][0])