padmalcom commited on
Commit
90aef4a
·
1 Parent(s): 31d943c

Upload 2 files

Browse files
Files changed (1) hide show
  1. predict_online.py +87 -0
predict_online.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import (
2
+ Wav2Vec2FeatureExtractor,
3
+ Wav2Vec2CTCTokenizer,
4
+ Wav2Vec2Processor
5
+ )
6
+ import os
7
+ import librosa
8
+ from datasets import Dataset
9
+ from datasets import disable_caching
10
+ import numpy as np
11
+ import torch.nn.functional as F
12
+ import torch
13
+ from model import Wav2Vec2ForCTCnCLS
14
+ from ctctrainer import CTCTrainer
15
+ from datacollator import DataCollatorCTCWithPadding
16
+
17
+ disable_caching()
18
+
19
+ cls_age_label_map = {'teens':0, 'twenties': 1, 'thirties': 2, 'fourties': 3, 'fifties': 4, 'sixties': 5, 'seventies': 6, 'eighties': 7}
20
+ cls_age_label_class_weights = [0] * len(cls_age_label_map)
21
+
22
+ cls_gender_label_map = {'female': 0, 'male': 1}
23
+ cls_gender_label_class_weights = [0] * len(cls_gender_label_map)
24
+
25
+ model_path = "padmalcom/wav2vec2-asr-ultimate-german"
26
+
27
+ tokenizer = Wav2Vec2CTCTokenizer("./vocab.json", unk_token="<unk>", pad_token="<pad>", word_delimiter_token="|")
28
+
29
+ feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=False)
30
+
31
+ processor = Wav2Vec2Processor(feature_extractor, tokenizer)
32
+
33
+ model = Wav2Vec2ForCTCnCLS.from_pretrained(
34
+ model_path,
35
+ vocab_size=len(processor.tokenizer),
36
+ age_cls_len=len(cls_age_label_map),
37
+ gender_cls_len=len(cls_gender_label_map),
38
+ age_cls_weights=cls_age_label_class_weights,
39
+ gender_cls_weights=cls_gender_label_class_weights,
40
+ alpha=0.1,
41
+ )
42
+
43
+ data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True, audio_only=True)
44
+
45
+ pred_data = {'file': ['audio2.wav']}
46
+
47
+ target_sr = 16000
48
+
49
+ def prepare_dataset_step1(example):
50
+ example["speech"], example["sampling_rate"] = librosa.load(example["file"], sr=target_sr)
51
+ return example
52
+
53
+ def prepare_dataset_step2(batch):
54
+ batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values
55
+ return batch
56
+
57
+ val_dataset = Dataset.from_dict(pred_data)
58
+ val_dataset = val_dataset.map(prepare_dataset_step1, load_from_cache_file=False)
59
+ val_dataset = val_dataset.map(prepare_dataset_step2, batch_size=2, batched=True, num_proc=1, load_from_cache_file=False)
60
+
61
+ trainer = CTCTrainer(
62
+ model=model,
63
+ data_collator=data_collator,
64
+ eval_dataset=val_dataset,
65
+ tokenizer=processor.feature_extractor,
66
+ )
67
+
68
+ data_collator.audio_only=True
69
+ predictions, labels, metrics = trainer.predict(val_dataset, metric_key_prefix="predict")
70
+ logits_ctc, logits_age_cls, logits_gender_cls = predictions
71
+
72
+ # process age classification
73
+ pred_ids_age_cls = np.argmax(logits_age_cls, axis=-1)
74
+ pred_age = pred_ids_age_cls[0]
75
+ age_class = [k for k, v in cls_age_label_map.items() if v == pred_age]
76
+ print("Predicted age: ", age_class[0])
77
+
78
+ # process gender classification
79
+ pred_ids_gender_cls = np.argmax(logits_gender_cls, axis=-1)
80
+ pred_gender = pred_ids_gender_cls[0]
81
+ gender_class = [k for k, v in cls_gender_label_map.items() if v == pred_gender]
82
+ print("Predicted gender: ", gender_class[0])
83
+
84
+ # process token classification
85
+ pred_ids_ctc = np.argmax(logits_ctc, axis=-1)
86
+ pred_str = processor.batch_decode(pred_ids_ctc, output_word_offsets=True)
87
+ print("pred text: ", pred_str.text)