Spaces:
Running
Running
File size: 8,837 Bytes
74a35d9 28d0c5f 74a35d9 28d0c5f 0b0c1a6 28d0c5f 0b0c1a6 28d0c5f 74a35d9 28d0c5f 88d40e4 0b0c1a6 74a35d9 28d0c5f 74a35d9 28d0c5f b5c05cd 0b0c1a6 28d0c5f b5c05cd 28d0c5f b5c05cd 28d0c5f b5c05cd 0b0c1a6 28d0c5f b5c05cd 28d0c5f 0746ae5 28d0c5f 0746ae5 28d0c5f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import time
from string import punctuation
import epitran
import numpy as np
import torch
from . import WordMatching as wm
from . import WordMetrics
from . import app_logger
from .models import AIModels, ModelInterfaces as mi, RuleBasedModels, models as mo
def getTrainer(language: str):
device = torch.device('cpu')
model, decoder = mo.getASRModel(language)
model = model.to(device)
model.eval()
asr_model = AIModels.NeuralASR(model, decoder)
if language == 'de':
epitran_deu_latn = epitran.Epitran('deu-Latn')
phonem_converter = RuleBasedModels.EpitranPhonemConverter(epitran_deu_latn)
elif language == 'en':
phonem_converter = RuleBasedModels.EngPhonemConverter()
else:
raise ValueError('Language not implemented')
trainer = PronunciationTrainer(asr_model, phonem_converter)
return trainer
class PronunciationTrainer:
current_transcript: str
current_ipa: str
current_recorded_audio: torch.Tensor
current_recorded_transcript: str
current_recorded_word_locations: list
current_recorded_intonations: torch.tensor
current_words_pronunciation_accuracy = []
categories_thresholds = np.array([80, 60, 59])
sampling_rate = 16000
def __init__(self, asr_model: mi.IASRModel, word_to_ipa_coverter: mi.ITextToPhonemModel) -> None:
self.asr_model = asr_model
self.ipa_converter = word_to_ipa_coverter
def getTranscriptAndWordsLocations(self, audio_length_in_samples: int):
audio_transcript = self.asr_model.getTranscript()
word_locations_in_samples = self.asr_model.getWordLocations()
fade_duration_in_samples = 0.05*self.sampling_rate
word_locations_in_samples = [(int(np.maximum(0, word['start_ts']-fade_duration_in_samples)), int(np.minimum(
audio_length_in_samples-1, word['end_ts']+fade_duration_in_samples))) for word in word_locations_in_samples]
return audio_transcript, word_locations_in_samples
def getWordsRelativeIntonation(self, Audio: torch.tensor, word_locations: list):
intonations = torch.zeros((len(word_locations), 1))
intonation_fade_samples = 0.3*self.sampling_rate
app_logger.info(intonations.shape)
for word in range(len(word_locations)):
intonation_start = int(np.maximum(
0, word_locations[word][0]-intonation_fade_samples))
intonation_end = int(np.minimum(
Audio.shape[1]-1, word_locations[word][1]+intonation_fade_samples))
intonations[word] = torch.sqrt(torch.mean(
Audio[0][intonation_start:intonation_end]**2))
intonations = intonations/torch.mean(intonations)
return intonations
##################### ASR Functions ###########################
def processAudioForGivenText(self, recordedAudio: torch.Tensor = None, real_text=None):
start = time.time()
app_logger.info('starting getAudioTranscript...')
recording_transcript, recording_ipa, word_locations = self.getAudioTranscript(recordedAudio)
duration = time.time() - start
app_logger.info(f'Time for NN to transcript audio: {duration}.')
start = time.time()
real_and_transcribed_words, real_and_transcribed_words_ipa, mapped_words_indices = self.matchSampleAndRecordedWords(
real_text, recording_transcript)
duration = time.time() - start
app_logger.info(f'Time for matching transcripts: {duration}.')
start_time, end_time = self.getWordLocationsFromRecordInSeconds(
word_locations, mapped_words_indices)
pronunciation_accuracy, current_words_pronunciation_accuracy = self.getPronunciationAccuracy(
real_and_transcribed_words) # _ipa
pronunciation_categories = self.getWordsPronunciationCategory(
current_words_pronunciation_accuracy)
result = {'recording_transcript': recording_transcript,
'real_and_transcribed_words': real_and_transcribed_words,
'recording_ipa': recording_ipa, 'start_time': start_time, 'end_time': end_time,
'real_and_transcribed_words_ipa': real_and_transcribed_words_ipa, 'pronunciation_accuracy': pronunciation_accuracy,
'pronunciation_categories': pronunciation_categories}
return result
def getAudioTranscript(self, recordedAudio: torch.Tensor = None):
current_recorded_audio = recordedAudio
app_logger.info('starting preprocessAudio...')
current_recorded_audio = self.preprocessAudio(current_recorded_audio)
app_logger.info('starting processAudio...')
self.asr_model.processAudio(current_recorded_audio)
app_logger.info('starting getTranscriptAndWordsLocations...')
current_recorded_transcript, current_recorded_word_locations = self.getTranscriptAndWordsLocations(
current_recorded_audio.shape[1])
app_logger.info('starting convertToPhonem...')
current_recorded_ipa = self.ipa_converter.convertToPhonem(current_recorded_transcript)
app_logger.info('ok, return audio transcript!')
return current_recorded_transcript, current_recorded_ipa, current_recorded_word_locations
def getWordLocationsFromRecordInSeconds(self, word_locations, mapped_words_indices) -> tuple[str, str]:
start_time = []
end_time = []
for word_idx in range(len(mapped_words_indices)):
start_time.append(float(word_locations[mapped_words_indices[word_idx]]
[0])/self.sampling_rate)
end_time.append(float(word_locations[mapped_words_indices[word_idx]]
[1])/self.sampling_rate)
return ' '.join([str(time) for time in start_time]), ' '.join([str(time) for time in end_time])
##################### END ASR Functions ###########################
##################### Evaluation Functions ###########################
def matchSampleAndRecordedWords(self, real_text, recorded_transcript):
words_estimated = recorded_transcript.split()
if real_text is None:
words_real = self.current_transcript[0].split()
else:
words_real = real_text.split()
mapped_words, mapped_words_indices = wm.get_best_mapped_words(
words_estimated, words_real)
real_and_transcribed_words = []
real_and_transcribed_words_ipa = []
for word_idx in range(len(words_real)):
if word_idx >= len(mapped_words)-1:
mapped_words.append('-')
real_and_transcribed_words.append(
(words_real[word_idx], mapped_words[word_idx]))
real_and_transcribed_words_ipa.append((self.ipa_converter.convertToPhonem(words_real[word_idx]),
self.ipa_converter.convertToPhonem(mapped_words[word_idx])))
return real_and_transcribed_words, real_and_transcribed_words_ipa, mapped_words_indices
def getPronunciationAccuracy(self, real_and_transcribed_words_ipa) -> tuple[float, list]:
total_mismatches = 0.
number_of_phonemes = 0.
current_words_pronunciation_accuracy = []
for pair in real_and_transcribed_words_ipa:
real_without_punctuation = self.removePunctuation(pair[0]).lower()
number_of_word_mismatches = WordMetrics.edit_distance_python(
real_without_punctuation, self.removePunctuation(pair[1]).lower())
total_mismatches += number_of_word_mismatches
number_of_phonemes_in_word = len(real_without_punctuation)
number_of_phonemes += number_of_phonemes_in_word
current_words_pronunciation_accuracy.append(float(
number_of_phonemes_in_word-number_of_word_mismatches)/number_of_phonemes_in_word*100)
percentage_of_correct_pronunciations = (
number_of_phonemes-total_mismatches)/number_of_phonemes*100
return np.round(percentage_of_correct_pronunciations), current_words_pronunciation_accuracy
def removePunctuation(self, word: str) -> str:
return ''.join([char for char in word if char not in punctuation])
def getWordsPronunciationCategory(self, accuracies) -> list:
categories = []
for accuracy in accuracies:
categories.append(
self.getPronunciationCategoryFromAccuracy(accuracy))
return categories
def getPronunciationCategoryFromAccuracy(self, accuracy) -> int:
return np.argmin(abs(self.categories_thresholds-accuracy))
def preprocessAudio(self, audio: torch.tensor) -> torch.tensor:
audio = audio-torch.mean(audio)
audio = audio/torch.max(torch.abs(audio))
return audio
|