Datasets:
Tasks:
Audio Classification
Sub-tasks:
keyword-spotting
Languages:
English
Size:
10K - 100K
ArXiv:
License:
#!/usr/bin/env python | |
# coding: utf-8 | |
# # Create embeddings with the transformer library | |
# | |
# We use the Huggingface transformers library to create an embedding for a an audio dataset | |
# | |
# ## tldr; Play as callable functions | |
import datasets | |
from transformers import AutoFeatureExtractor, AutoModel, ASTForAudioClassification | |
import torch | |
import pandas as pd | |
import umap | |
import numpy as np | |
_SPLIT = "train" | |
_MODELNAME = "MIT/ast-finetuned-speech-commands-v2" | |
def __set_device(): | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
if device == "cuda": | |
torch.cuda.empty_cache() | |
return device | |
def __permute_model2dataset_probabilities(model_probabilities): | |
""" | |
Model and dataset int mapping is different. Therefore, we need to permute the model's | |
logits to match up to the dataset's order. | |
Due to small size of vector do this on cpu | |
""" | |
cpu_copy = model_probabilities.to("cpu").detach().numpy() | |
# only select model2dataset_truncated values as they map model ints to existing dataset ints | |
permuted_vector = np.empty(shape=(len(labels),)) | |
# Problem: model output has more output classes than dataset | |
# Hence, only add those outputs that are mapped | |
for key in model2dataset_truncated: | |
dataset_int = model2dataset_truncated[key] | |
permuted_vector[dataset_int] = cpu_copy[key] | |
return permuted_vector | |
def batch_probabilities_and_embeddings(model, feature_extractor, classifier): | |
device = model.device | |
def processing(batch): | |
audios = [element["array"] for element in batch["audio"]] | |
inputs = feature_extractor(raw_speech=audios, return_tensors="pt", padding=True, sampling_rate=16000).to(device) | |
outputs = model(**inputs) | |
embeddings = classifier(**inputs).last_hidden_state[:, 0].cpu() | |
return {"logits":torch.tensor([__permute_model2dataset_probabilities(logit) for logit in outputs.logits]), "embedding": embeddings} | |
return processing | |
def annotate_probabilities_and_embeddings(dataset, modelname, batched=True, batch_size= 8): | |
model = ASTForAudioClassification.from_pretrained(modelname) | |
feature_extractor = AutoFeatureExtractor.from_pretrained(modelname, padding=True) | |
classifier = AutoModel.from_pretrained(modelname, output_hidden_states=True) | |
device = __set_device() | |
calc_outputs = batch_probabilities_and_embeddings(model.to(device), feature_extractor, classifier.to(device)) | |
output_dataset = dataset.map(calc_outputs, batched = batched, batch_size = batch_size) | |
return output_dataset | |
def annotate_batch(model, dataset): | |
device = model.device | |
def batch_annotation(batch): | |
logits = [torch.tensor(element) for element in batch["logits"]] | |
probabilities_per_class = [torch.nn.functional.softmax(logit, dim=-1) for logit in logits] | |
predicted_class_ids = [torch.argmax(logit).item() for logit in logits] | |
# logits are already permuted to match dataset ordering -> no additional work needed | |
predicted_labels = [labels[predicted_class_id] for predicted_class_id in predicted_class_ids] | |
annotated_labels = [labels[element] for element in batch["label"]] | |
probabilities = [] | |
for index, prob_per_class in enumerate(probabilities_per_class): | |
probabilities.append(prob_per_class[predicted_class_ids[index]].item()) | |
return {"label_string": annotated_labels, "probability": probabilities, | |
"probability_vector": probabilities_per_class, | |
"prediction": predicted_class_ids, | |
"prediction_string": predicted_labels} | |
return batch_annotation | |
def annotate_dataset(dataset, modelname, batched=True, batch_size=8): | |
model = ASTForAudioClassification.from_pretrained(modelname) | |
device = __set_device() | |
annotate = annotate_batch(model.to(device), dataset) | |
annotated_dataset = dataset.map(annotate, batched=batched, batch_size=batch_size) | |
return annotated_dataset | |
# ## Step-by-step example on speech-commands | |
# | |
# ### Load speech-commands from Huggingface hub | |
# Use validation split to evaluate model's performance on unseen data | |
dataset = datasets.load_dataset('speech_commands', 'v0.01', split=_SPLIT) | |
labels = dataset.features["label"].names | |
# use dict comprehension to build a dict that maps the dataset's string label to the dataset's int label | |
label_dict = {label: i for label, i in zip(labels, range(len(labels)))} | |
# {key: value for key, value in zip(keys, values)} | |
model = ASTForAudioClassification.from_pretrained(_MODELNAME) | |
# look up label from model int in the label dict and translate that label into the dataset int | |
model2dataset_int_conversion = {i: label_dict[model.config.id2label[i]] if model.config.id2label[i] in label_dict.keys() | |
else -1 for i in range(model.config.num_labels)} | |
model2dataset_truncated = {key:value for key,value in model2dataset_int_conversion.items() if value != -1} | |
# Let's have a look at all of the labels that we want to predict | |
print(labels) | |
# ### Compute probabilities and embeddings and annotate dataset | |
# calculate logits and embedding for each sample and annotate | |
dataset_annotated = annotate_probabilities_and_embeddings(dataset, _MODELNAME) | |
# Now annotate labels and probabilities | |
dataset_enriched = annotate_dataset(dataset_annotated, _MODELNAME) | |
# ### Reduce embeddings for faster visualization | |
embeddings = np.stack(np.array(dataset_enriched['embedding'])) | |
reducer = umap.UMAP() | |
reduced_embedding = reducer.fit_transform(embeddings) | |
dataset_enriched = dataset_enriched.add_column("embedding_reduced", list(reduced_embedding)) | |
df = dataset_enriched.to_pandas() | |
df.to_parquet("data/dataset_audio_" + _SPLIT + ".parquet.gzip", compression='gzip') |