asr_malayalam / optimized_processing.py
aoxo's picture
Add files using upload-large-folder tool
77c2cb3 verified
raw
history blame
5.08 kB
import os
from datasets import Dataset, DatasetDict, load_dataset
from datasets.features import Audio
import pandas as pd
import numpy as np
from tqdm import tqdm
# Function to load your custom dataset
def load_custom_dataset(data_dir):
data = {
"audio": [],
"text": []
}
wav_dir = os.path.join(data_dir, 'wav')
txt_dir = os.path.join(data_dir, 'transcription')
# Assuming filenames in 'wav' and 'txt' match
for wav_file in os.listdir(wav_dir):
if wav_file.endswith('.wav'):
txt_file = wav_file.replace('.wav', '.txt')
wav_path = os.path.join(wav_dir, wav_file)
txt_path = os.path.join(txt_dir, txt_file)
# Read the transcription text
with open(txt_path, 'r', encoding='utf-8') as f:
transcription = f.read().strip()
# Append to the dataset
data["audio"].append(wav_path)
data["text"].append(transcription)
# Create a pandas dataframe
df = pd.DataFrame(data)
# Convert to a Hugging Face dataset
dataset = Dataset.from_pandas(df)
# Define the audio feature (for .wav files)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) # Adjust the sampling rate if needed
return dataset
# Load your custom dataset
custom_train_dataset = load_custom_dataset("./")
# Load Common Voice test set (Malayalam)
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)
common_voice_test = common_voice_test.select_columns(["audio", "sentence"])
# Combine them into a DatasetDict
dataset_dict = DatasetDict({
"train": custom_train_dataset,
"test": common_voice_test
})
# Now you have the `dataset_dict` with your custom train set and the Common Voice test set
print(dataset_dict)
from transformers import WhisperFeatureExtractor
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
from transformers import WhisperTokenizer
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
print(dataset_dict['train'][0])
import gc # for garbage collection
def prepare_dataset(batch):
# Prepare input features for each audio file in the batch
audio_arrays = [item["array"] for item in batch["audio"]]
sampling_rates = [item["sampling_rate"] for item in batch["audio"]]
# Extract features for each audio sample
features = []
for audio, sr in zip(audio_arrays, sampling_rates):
feature = feature_extractor(audio, sampling_rate=sr).input_features[0]
feature = np.array(feature, dtype=np.float16)
features.append(feature)
# Free memory after each feature extraction
del audio # Remove reference to the audio array
del sr
gc.collect() # Trigger garbage collection to free memory
# Store features in batch
batch["input_features"] = features
# Encode target text to label ids
# Consider using a tokenizer with padding strategy (e.g., `padding="max_length"` or `padding="longest"`)
batch["labels"] = tokenizer(batch["text"], padding="longest", truncation=True).input_ids
return batch
# Function to process and save dataset in batches
def process_and_save_in_batches(dataset, batch_size=1000, save_path="processed_dataset"):
# Create an empty list to store the processed batches
all_processed = []
# Loop through the dataset in chunks
for start_idx in range(0, len(dataset), batch_size):
# Get the batch slice
batch = dataset[start_idx:start_idx+batch_size]
batch = Dataset.from_dict(batch)
# Apply the processing function to the batch
processed_batch = batch.map(
prepare_dataset,
remove_columns=dataset.column_names,
batched=True,
batch_size=batch_size,
num_proc = None,
)
print(f"Batch {start_idx} done")
# Append the processed batch to the list
all_processed.append(processed_batch)
# Clear memory after processing each batch
del batch # Remove reference to the batch
gc.collect() # Trigger garbage collection
# Save each processed batch to disk
processed_batch.save_to_disk(os.path.join(save_path, f"batch_{start_idx // batch_size}"))
del processed_batch # Free memory after saving the batch
gc.collect()
# Optionally, if you want to save the whole dataset in one file at the end
# You can merge all processed batches (not recommended for large datasets)
final_dataset = concatenate_datasets(all_processed)
final_dataset.save_to_disk(save_path)
# Process and save the dataset in batches
process_and_save_in_batches(dataset_dict['train'], batch_size=1000, save_path="processed_dataset")