Spaces:
Running
Running
alessandro trinca tornidor
commited on
Commit
·
3fdcb38
1
Parent(s):
79fb9f6
feat: frontend, add a table with all entries from language df and pick the and their API
Browse files- aip_trainer/lambdas/data_de_en_with_categories.json +0 -0
- aip_trainer/lambdas/lambdaGetSample.py +63 -43
- aip_trainer/lambdas/lambdaSpeechToScore.py +1 -1
- aip_trainer/lambdas/lambdaTTS.py +1 -1
- static/css/style-new.css +47 -13
- static/javascript/callbacks.js +157 -104
- static/main.html +17 -15
- webApp.py +35 -7
aip_trainer/lambdas/data_de_en_with_categories.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
aip_trainer/lambdas/lambdaGetSample.py
CHANGED
@@ -4,8 +4,9 @@ import random
|
|
4 |
from pathlib import Path
|
5 |
|
6 |
import epitran
|
|
|
7 |
|
8 |
-
from aip_trainer import PROJECT_ROOT_FOLDER
|
9 |
from aip_trainer.models import RuleBasedModels
|
10 |
|
11 |
|
@@ -16,25 +17,33 @@ class TextDataset:
|
|
16 |
self.language = language
|
17 |
|
18 |
def __getitem__(self, idx):
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
elif self.language == 'en':
|
23 |
-
line = [self.table_dataframe['en_sentence'].iloc[idx]]
|
24 |
-
else:
|
25 |
-
line = [self.table_dataframe['sentence'].iloc[idx]]
|
26 |
-
return line
|
27 |
|
28 |
def __len__(self):
|
29 |
return self.number_of_samples
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
sample_folder = Path(PROJECT_ROOT_FOLDER / "aip_trainer" / "lambdas")
|
33 |
lambda_database = {}
|
34 |
lambda_ipa_converter = {}
|
35 |
|
36 |
-
with open(sample_folder / '
|
37 |
-
df =
|
38 |
|
39 |
lambda_database['de'] = TextDataset(df, 'de')
|
40 |
lambda_database['en'] = TextDataset(df, 'en')
|
@@ -45,40 +54,31 @@ lambda_ipa_converter['en'] = RuleBasedModels.EngPhonemConverter()
|
|
45 |
|
46 |
|
47 |
def lambda_handler(event, context):
|
48 |
-
|
49 |
body = json.loads(event['body'])
|
50 |
|
51 |
-
|
|
|
|
|
|
|
52 |
|
53 |
language = body['language']
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
sample_in_category = (sentence_category ==
|
72 |
-
category) or category == 0
|
73 |
-
|
74 |
-
translated_trascript = ""
|
75 |
-
|
76 |
-
current_ipa = lambda_ipa_converter[language].convertToPhonem(
|
77 |
-
current_transcript[0])
|
78 |
-
|
79 |
-
result = {'real_transcript': current_transcript,
|
80 |
-
'ipa_transcript': current_ipa,
|
81 |
-
'transcript_translation': translated_trascript}
|
82 |
|
83 |
return json.dumps(result)
|
84 |
|
@@ -86,6 +86,26 @@ def lambda_handler(event, context):
|
|
86 |
def getSentenceCategory(sentence) -> int:
|
87 |
number_of_words = len(sentence.split())
|
88 |
categories_word_limits = [0, 8, 20, 100000]
|
89 |
-
for category in range(len(categories_word_limits)-1):
|
90 |
if categories_word_limits[category] < number_of_words <= categories_word_limits[category + 1]:
|
91 |
-
return category+1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from pathlib import Path
|
5 |
|
6 |
import epitran
|
7 |
+
import pandas as pd
|
8 |
|
9 |
+
from aip_trainer import PROJECT_ROOT_FOLDER, app_logger
|
10 |
from aip_trainer.models import RuleBasedModels
|
11 |
|
12 |
|
|
|
17 |
self.language = language
|
18 |
|
19 |
def __getitem__(self, idx):
|
20 |
+
language_sentence = f"{self.language}_sentence" if self.language != '-' else 'sentence'
|
21 |
+
language_series = self.table_dataframe[language_sentence]
|
22 |
+
return [language_series.iloc[idx]]
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
def __len__(self):
|
25 |
return self.number_of_samples
|
26 |
|
27 |
+
def get_category_from_df_by_language(self, language: str, category_value:int):
|
28 |
+
selector = self.table_dataframe[f"{language}_category"] == category_value
|
29 |
+
df_by_category = self.table_dataframe[selector]
|
30 |
+
return df_by_category
|
31 |
+
|
32 |
+
def get_random_sample_from_df(self, language: str, category_value:int):
|
33 |
+
app_logger.info(f"language={language}, category_value={category_value}.")
|
34 |
+
choice = self.table_dataframe.sample(n=1)
|
35 |
+
if category_value !=0:
|
36 |
+
df_language_filtered_by_category_and_language = self.get_category_from_df_by_language(language, category_value)
|
37 |
+
choice = df_language_filtered_by_category_and_language.sample(n=1)
|
38 |
+
return [choice[f"{language}_sentence"].iloc[0]]
|
39 |
+
|
40 |
|
41 |
sample_folder = Path(PROJECT_ROOT_FOLDER / "aip_trainer" / "lambdas")
|
42 |
lambda_database = {}
|
43 |
lambda_ipa_converter = {}
|
44 |
|
45 |
+
with open(sample_folder / 'data_de_en_with_categories.json', 'r') as src:
|
46 |
+
df = pd.read_json(src)
|
47 |
|
48 |
lambda_database['de'] = TextDataset(df, 'de')
|
49 |
lambda_database['en'] = TextDataset(df, 'en')
|
|
|
54 |
|
55 |
|
56 |
def lambda_handler(event, context):
|
|
|
57 |
body = json.loads(event['body'])
|
58 |
|
59 |
+
try:
|
60 |
+
category = int(body['category'])
|
61 |
+
except KeyError:
|
62 |
+
category = 0
|
63 |
|
64 |
language = body['language']
|
65 |
+
try:
|
66 |
+
sample_idx = int(body['idx'])
|
67 |
+
except KeyError:
|
68 |
+
sample_idx = None
|
69 |
+
|
70 |
+
app_logger.info(f"category={category}, language={language}, sample_idx={sample_idx}.")
|
71 |
+
lambda_df_lang = lambda_database[language]
|
72 |
+
current_transcript = lambda_df_lang[sample_idx] if sample_idx is not None else lambda_df_lang.get_random_sample_from_df(language, category)
|
73 |
+
# sentence_category = getSentenceCategory(current_transcript[0])
|
74 |
+
current_ipa = lambda_ipa_converter[language].convertToPhonem(current_transcript[0])
|
75 |
+
|
76 |
+
app_logger.info(f"real_transcript={current_transcript}, ipa_transcript={current_ipa}.")
|
77 |
+
result = {
|
78 |
+
'real_transcript': current_transcript,
|
79 |
+
'ipa_transcript': current_ipa,
|
80 |
+
'transcript_translation': ""
|
81 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
return json.dumps(result)
|
84 |
|
|
|
86 |
def getSentenceCategory(sentence) -> int:
|
87 |
number_of_words = len(sentence.split())
|
88 |
categories_word_limits = [0, 8, 20, 100000]
|
89 |
+
for category in range(len(categories_word_limits) - 1):
|
90 |
if categories_word_limits[category] < number_of_words <= categories_word_limits[category + 1]:
|
91 |
+
return category + 1
|
92 |
+
|
93 |
+
|
94 |
+
if __name__ == "__main__":
|
95 |
+
import pandas as pd
|
96 |
+
with open(sample_folder / 'data_de_en_2.pickle', 'rb') as handle:
|
97 |
+
df = pickle.load(handle)
|
98 |
+
pass
|
99 |
+
df["de_category"] = df["de_sentence"].apply(getSentenceCategory)
|
100 |
+
print("de_category added")
|
101 |
+
df["en_category"] = df["en_sentence"].apply(getSentenceCategory)
|
102 |
+
print("en_category added")
|
103 |
+
df_json = df.to_json()
|
104 |
+
with open(sample_folder / 'data_de_en_with_categories.json', 'w') as dst:
|
105 |
+
dst.write(df_json)
|
106 |
+
print("data_de_en_with_categories.json written")
|
107 |
+
with open(sample_folder / 'data_de_en_with_categories.json', 'r') as src:
|
108 |
+
jj = json.load(src)
|
109 |
+
print("jj:", jj)
|
110 |
+
df2 = pd.read_json(json.dumps(jj))
|
111 |
+
print(df2)
|
aip_trainer/lambdas/lambdaSpeechToScore.py
CHANGED
@@ -35,7 +35,7 @@ def lambda_handler(event, context):
|
|
35 |
'headers': {
|
36 |
'Access-Control-Allow-Headers': '*',
|
37 |
'Access-Control-Allow-Credentials': "true",
|
38 |
-
'Access-Control-Allow-Origin': '
|
39 |
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
|
40 |
},
|
41 |
'body': ''
|
|
|
35 |
'headers': {
|
36 |
'Access-Control-Allow-Headers': '*',
|
37 |
'Access-Control-Allow-Credentials': "true",
|
38 |
+
'Access-Control-Allow-Origin': 'http://127.0.0.1:3000/',
|
39 |
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
|
40 |
},
|
41 |
'body': ''
|
aip_trainer/lambdas/lambdaTTS.py
CHANGED
@@ -37,7 +37,7 @@ def lambda_handler(event, context):
|
|
37 |
'statusCode': 200,
|
38 |
'headers': {
|
39 |
'Access-Control-Allow-Headers': '*',
|
40 |
-
'Access-Control-Allow-Origin': '
|
41 |
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
|
42 |
},
|
43 |
'body': json.dumps(
|
|
|
37 |
'statusCode': 200,
|
38 |
'headers': {
|
39 |
'Access-Control-Allow-Headers': '*',
|
40 |
+
'Access-Control-Allow-Origin': 'http://127.0.0.1:3000/',
|
41 |
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
|
42 |
},
|
43 |
'body': json.dumps(
|
static/css/style-new.css
CHANGED
@@ -127,9 +127,9 @@ a.disabled {
|
|
127 |
display: block;
|
128 |
position: absolute;
|
129 |
left: 2%;
|
130 |
-
top:
|
131 |
transform: translate(-0%, -0%);
|
132 |
-
height:
|
133 |
width: 96%;
|
134 |
max-width: 96%;
|
135 |
background: #ffff;
|
@@ -138,13 +138,30 @@ a.disabled {
|
|
138 |
box-shadow: 0 0 20px 8px #d0d0d0;
|
139 |
}
|
140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
.container-small {
|
142 |
position: fixed;
|
143 |
-
left:
|
144 |
-
top:
|
145 |
transform: translate(-0%, -0%);
|
146 |
-
height:
|
147 |
-
width:
|
148 |
background: #ffff;
|
149 |
overflow: hidden;
|
150 |
border-radius: 20px;
|
@@ -223,8 +240,8 @@ a.disabled {
|
|
223 |
|
224 |
.mic-button-div {
|
225 |
position: fixed;
|
226 |
-
left:
|
227 |
-
top:
|
228 |
}
|
229 |
|
230 |
/*############### Drop-down ############# */
|
@@ -345,7 +362,7 @@ a.disabled {
|
|
345 |
.credits-icon-div {
|
346 |
position: fixed;
|
347 |
left: 90.5%;
|
348 |
-
top:
|
349 |
font-size: x-small;
|
350 |
}
|
351 |
|
@@ -384,9 +401,9 @@ a.disabled {
|
|
384 |
display: block;
|
385 |
position: absolute;
|
386 |
left: 2%;
|
387 |
-
top:
|
388 |
transform: translate(-0%, -0%);
|
389 |
-
height:
|
390 |
width: 96%;
|
391 |
max-width: 96%;
|
392 |
background: #ffff;
|
@@ -395,6 +412,23 @@ a.disabled {
|
|
395 |
box-shadow: 0 0 20px 8px #d0d0d0;
|
396 |
}
|
397 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
398 |
.icon-text {
|
399 |
font-size: 0.8em !important;
|
400 |
text-align: center;
|
@@ -427,7 +461,7 @@ a.disabled {
|
|
427 |
.mic-button-div {
|
428 |
position: fixed;
|
429 |
left: 40%;
|
430 |
-
top:
|
431 |
}
|
432 |
|
433 |
.link-icon-div {
|
@@ -468,4 +502,4 @@ a.disabled {
|
|
468 |
font-size: 0.8em;
|
469 |
}
|
470 |
|
471 |
-
}
|
|
|
127 |
display: block;
|
128 |
position: absolute;
|
129 |
left: 2%;
|
130 |
+
top: 15%;
|
131 |
transform: translate(-0%, -0%);
|
132 |
+
height: 45%;
|
133 |
width: 96%;
|
134 |
max-width: 96%;
|
135 |
background: #ffff;
|
|
|
138 |
box-shadow: 0 0 20px 8px #d0d0d0;
|
139 |
}
|
140 |
|
141 |
+
.container2 {
|
142 |
+
display: block;
|
143 |
+
position: absolute;
|
144 |
+
left: 2%;
|
145 |
+
top: 63%;
|
146 |
+
transform: translate(-0%, -0%);
|
147 |
+
height: 30%;
|
148 |
+
width: 96%;
|
149 |
+
max-width: 96%;
|
150 |
+
background: #ffff;
|
151 |
+
overflow: hidden;
|
152 |
+
border-radius: 20px;
|
153 |
+
box-shadow: 0 0 20px 8px #d0d0d0;
|
154 |
+
overflow: scroll;
|
155 |
+
max-height: 30em;
|
156 |
+
}
|
157 |
+
|
158 |
.container-small {
|
159 |
position: fixed;
|
160 |
+
left: 73%;
|
161 |
+
top: 95%;
|
162 |
transform: translate(-0%, -0%);
|
163 |
+
height: 4%;
|
164 |
+
width: 15%;
|
165 |
background: #ffff;
|
166 |
overflow: hidden;
|
167 |
border-radius: 20px;
|
|
|
240 |
|
241 |
.mic-button-div {
|
242 |
position: fixed;
|
243 |
+
left: 60%;
|
244 |
+
top: 5%
|
245 |
}
|
246 |
|
247 |
/*############### Drop-down ############# */
|
|
|
362 |
.credits-icon-div {
|
363 |
position: fixed;
|
364 |
left: 90.5%;
|
365 |
+
top: 96%;
|
366 |
font-size: x-small;
|
367 |
}
|
368 |
|
|
|
401 |
display: block;
|
402 |
position: absolute;
|
403 |
left: 2%;
|
404 |
+
top: 15%;
|
405 |
transform: translate(-0%, -0%);
|
406 |
+
height: 85%;
|
407 |
width: 96%;
|
408 |
max-width: 96%;
|
409 |
background: #ffff;
|
|
|
412 |
box-shadow: 0 0 20px 8px #d0d0d0;
|
413 |
}
|
414 |
|
415 |
+
.container2 {
|
416 |
+
display: block;
|
417 |
+
position: absolute;
|
418 |
+
left: 2%;
|
419 |
+
top: 63%;
|
420 |
+
transform: translate(-0%, -0%);
|
421 |
+
height: 30%;
|
422 |
+
width: 96%;
|
423 |
+
max-width: 96%;
|
424 |
+
background: #ffff;
|
425 |
+
overflow: hidden;
|
426 |
+
border-radius: 20px;
|
427 |
+
box-shadow: 0 0 20px 8px #d0d0d0;
|
428 |
+
overflow: scroll;
|
429 |
+
max-height: 30em;
|
430 |
+
}
|
431 |
+
|
432 |
.icon-text {
|
433 |
font-size: 0.8em !important;
|
434 |
text-align: center;
|
|
|
461 |
.mic-button-div {
|
462 |
position: fixed;
|
463 |
left: 40%;
|
464 |
+
top: 5%
|
465 |
}
|
466 |
|
467 |
.link-icon-div {
|
|
|
502 |
font-size: 0.8em;
|
503 |
}
|
504 |
|
505 |
+
}
|
static/javascript/callbacks.js
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
// Audio context initialization
|
4 |
let mediaRecorder, audioChunks, audioBlob, stream, audioRecorded;
|
5 |
const ctx = new AudioContext();
|
@@ -25,12 +23,14 @@ let currentSoundRecorded = false;
|
|
25 |
let currentText, currentIpa, real_transcripts_ipa, matched_transcripts_ipa;
|
26 |
let wordCategories;
|
27 |
let startTime, endTime;
|
|
|
|
|
|
|
28 |
|
29 |
// API related variables
|
30 |
let AILanguage = "de"; // Standard is German
|
31 |
|
32 |
-
|
33 |
-
let STScoreAPIKey = 'rll5QsTiv83nti99BW6uCmvs9BDVxSB39SVFceYb'; // Public Key. If, for some reason, you would like a private one, send-me a message and we can discuss some possibilities
|
34 |
let apiMainPathSample = '';// 'http://127.0.0.1:3001';// 'https://a3hj0l2j2m.execute-api.eu-central-1.amazonaws.com/Prod';
|
35 |
let apiMainPathSTS = '';// 'https://wrg7ayuv7i.execute-api.eu-central-1.amazonaws.com/Prod';
|
36 |
|
@@ -57,8 +57,6 @@ const unblockUI = () => {
|
|
57 |
|
58 |
if (currentSoundRecorded)
|
59 |
document.getElementById("playRecordedAudio").classList.remove('disabled');
|
60 |
-
|
61 |
-
|
62 |
};
|
63 |
|
64 |
const blockUI = () => {
|
@@ -79,24 +77,24 @@ const UIError = () => {
|
|
79 |
document.getElementById("buttonNext").onclick = () => getNextSample(); //If error, user can only try to get a new sample
|
80 |
document.getElementById("buttonNext").style["background-color"] = '#58636d';
|
81 |
|
82 |
-
document.getElementById("recorded_ipa_script").
|
83 |
-
document.getElementById("single_word_ipa_pair").
|
84 |
-
document.getElementById("ipa_script").
|
85 |
|
86 |
-
document.getElementById("main_title").
|
87 |
-
document.getElementById("original_script").
|
88 |
};
|
89 |
|
90 |
const UINotSupported = () => {
|
91 |
unblockUI();
|
92 |
|
93 |
-
document.getElementById("main_title").
|
94 |
|
95 |
}
|
96 |
|
97 |
const UIRecordingError = () => {
|
98 |
unblockUI();
|
99 |
-
document.getElementById("main_title").
|
100 |
startMediaDevice();
|
101 |
}
|
102 |
|
@@ -112,19 +110,19 @@ function updateScore(currentPronunciationScore) {
|
|
112 |
}
|
113 |
|
114 |
const cacheSoundFiles = async () => {
|
115 |
-
await fetch(soundsPath + '/ASR_good.wav').then(
|
116 |
then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).
|
117 |
then(decodeAudioData => {
|
118 |
soundFileGood = decodeAudioData;
|
119 |
});
|
120 |
|
121 |
-
await fetch(soundsPath + '/ASR_okay.wav').then(
|
122 |
then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).
|
123 |
then(decodeAudioData => {
|
124 |
soundFileOkay = decodeAudioData;
|
125 |
});
|
126 |
|
127 |
-
await fetch(soundsPath + '/ASR_bad.wav').then(
|
128 |
then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).
|
129 |
then(decodeAudioData => {
|
130 |
soundFileBad = decodeAudioData;
|
@@ -132,28 +130,7 @@ const cacheSoundFiles = async () => {
|
|
132 |
}
|
133 |
|
134 |
const getNextSample = async () => {
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
blockUI();
|
139 |
-
|
140 |
-
if (!serverIsInitialized)
|
141 |
-
await initializeServer();
|
142 |
-
|
143 |
-
if (!serverWorking) {
|
144 |
-
UIError();
|
145 |
-
return;
|
146 |
-
}
|
147 |
-
|
148 |
-
if (soundFileBad == null)
|
149 |
-
cacheSoundFiles();
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
updateScore(parseFloat(document.getElementById("pronunciation_accuracy").innerHTML));
|
154 |
-
|
155 |
-
document.getElementById("main_title").innerHTML = "Processing new sample...";
|
156 |
-
|
157 |
|
158 |
if (document.getElementById('lengthCat1').checked) {
|
159 |
sample_difficult = 0;
|
@@ -178,44 +155,60 @@ const getNextSample = async () => {
|
|
178 |
body: JSON.stringify({
|
179 |
"category": sample_difficult.toString(), "language": AILanguage
|
180 |
}),
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
|
|
|
|
|
|
|
|
184 |
|
|
|
185 |
|
|
|
|
|
186 |
|
187 |
-
|
188 |
-
|
189 |
-
doc.innerHTML = currentText;
|
190 |
|
191 |
-
|
|
|
|
|
|
|
192 |
|
193 |
-
|
194 |
-
|
195 |
|
196 |
-
|
197 |
-
document.getElementById("pronunciation_accuracy").innerHTML = "";
|
198 |
-
document.getElementById("single_word_ipa_pair").innerHTML = "Reference | Spoken"
|
199 |
-
document.getElementById("section_accuracy").innerHTML = "| Score: " + currentScore.toString() + " - (" + currentSample.toString() + ")";
|
200 |
-
currentSample += 1;
|
201 |
|
202 |
-
|
|
|
203 |
|
204 |
-
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
-
|
207 |
-
|
208 |
-
document.getElementById("playRecordedAudio").classList.add('disabled');
|
209 |
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
|
|
|
|
|
217 |
|
218 |
-
|
|
|
|
|
|
|
219 |
|
220 |
const updateRecordingState = async () => {
|
221 |
if (isRecording) {
|
@@ -229,15 +222,15 @@ const updateRecordingState = async () => {
|
|
229 |
}
|
230 |
|
231 |
const generateWordModal = (word_idx) => {
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
}
|
236 |
|
237 |
const recordSample = async () => {
|
238 |
|
239 |
-
document.getElementById("main_title").
|
240 |
-
document.getElementById("recordIcon").
|
241 |
blockUI();
|
242 |
document.getElementById("recordAudio").classList.remove('disabled');
|
243 |
audioChunks = [];
|
@@ -251,17 +244,16 @@ const changeLanguage = (language, generateNewSample = false) => {
|
|
251 |
AILanguage = language;
|
252 |
languageFound = false;
|
253 |
let languageIdentifier, languageName;
|
|
|
254 |
switch (language) {
|
255 |
case 'de':
|
256 |
-
|
257 |
-
document.getElementById("languageBox").innerHTML = "German";
|
258 |
languageIdentifier = 'de';
|
259 |
languageName = 'Anna';
|
260 |
break;
|
261 |
|
262 |
case 'en':
|
263 |
-
|
264 |
-
document.getElementById("languageBox").innerHTML = "English";
|
265 |
languageIdentifier = 'en';
|
266 |
languageName = 'Daniel';
|
267 |
break;
|
@@ -285,6 +277,7 @@ const changeLanguage = (language, generateNewSample = false) => {
|
|
285 |
}
|
286 |
}
|
287 |
}
|
|
|
288 |
if (generateNewSample)
|
289 |
getNextSample();
|
290 |
}
|
@@ -303,10 +296,10 @@ const startMediaDevice = () => {
|
|
303 |
stream = _stream
|
304 |
mediaRecorder = new MediaRecorder(stream);
|
305 |
|
306 |
-
let
|
307 |
mediaRecorder.ondataavailable = event => {
|
308 |
|
309 |
-
|
310 |
audioChunks.push(event.data);
|
311 |
};
|
312 |
|
@@ -334,29 +327,28 @@ const startMediaDevice = () => {
|
|
334 |
await fetch(apiMainPathSTS + '/GetAccuracyFromRecordedAudio', {
|
335 |
method: "post",
|
336 |
body: JSON.stringify({ "title": currentText[0], "base64Audio": audioBase64, "language": AILanguage }),
|
337 |
-
headers: { "X-Api-Key": STScoreAPIKey }
|
338 |
|
339 |
}).then(res => res.json()).
|
340 |
-
then(
|
341 |
|
342 |
if (playAnswerSounds)
|
343 |
-
playSoundForAnswerAccuracy(parseFloat(
|
344 |
|
345 |
-
document.getElementById("recorded_ipa_script").
|
346 |
document.getElementById("recordAudio").classList.add('disabled');
|
347 |
-
document.getElementById("main_title").
|
348 |
-
document.getElementById("pronunciation_accuracy").
|
349 |
|
350 |
-
lettersOfWordAreCorrect =
|
351 |
|
352 |
|
353 |
-
startTime =
|
354 |
-
endTime =
|
355 |
|
356 |
|
357 |
-
real_transcripts_ipa =
|
358 |
-
matched_transcripts_ipa =
|
359 |
-
wordCategories =
|
360 |
let currentTextWords = currentText[0].split(" ")
|
361 |
|
362 |
coloredWords = "";
|
@@ -413,9 +405,9 @@ const playSoundForAnswerAccuracy = async (accuracy) => {
|
|
413 |
|
414 |
const playAudio = async () => {
|
415 |
|
416 |
-
document.getElementById("main_title").
|
417 |
playWithMozillaApi(currentText[0]);
|
418 |
-
document.getElementById("main_title").
|
419 |
|
420 |
};
|
421 |
|
@@ -436,7 +428,7 @@ const playRecording = async (start = null, end = null) => {
|
|
436 |
audioRecorded.addEventListener("ended", function () {
|
437 |
audioRecorded.currentTime = 0;
|
438 |
unblockUI();
|
439 |
-
document.getElementById("main_title").
|
440 |
});
|
441 |
await audioRecorded.play();
|
442 |
|
@@ -450,7 +442,7 @@ const playRecording = async (start = null, end = null) => {
|
|
450 |
unblockUI();
|
451 |
audioRecorded.pause();
|
452 |
audioRecorded.currentTime = 0;
|
453 |
-
document.getElementById("main_title").
|
454 |
}, endTimeInMs);
|
455 |
|
456 |
}
|
@@ -473,15 +465,15 @@ const playNativeAndRecordedWord = async (word_idx) => {
|
|
473 |
const stopRecording = () => {
|
474 |
isRecording = false
|
475 |
mediaRecorder.stop()
|
476 |
-
document.getElementById("main_title").
|
477 |
}
|
478 |
|
479 |
|
480 |
const playCurrentWord = async (word_idx) => {
|
481 |
|
482 |
-
document.getElementById("main_title").
|
483 |
playWithMozillaApi(currentText[0].split(' ')[word_idx]);
|
484 |
-
document.getElementById("main_title").
|
485 |
}
|
486 |
|
487 |
// TODO: Check if fallback is correct
|
@@ -534,19 +526,19 @@ const wrapWordForPlayingLink = (word, word_idx, isFromRecording, word_accuracy_c
|
|
534 |
}
|
535 |
|
536 |
const wrapWordForIndividualPlayback = (word, word_idx) => {
|
537 |
-
|
538 |
-
|
539 |
return '<a onmouseover="generateWordModal(' + word_idx.toString() + ')" style = " white-space:nowrap; " href="javascript:playNativeAndRecordedWord(' + word_idx.toString() + ')" >' + word + '</a> '
|
540 |
-
|
541 |
}
|
542 |
|
543 |
// ########## Function to initialize server ###############
|
544 |
// This is to try to avoid aws lambda cold start
|
545 |
try {
|
|
|
|
|
|
|
|
|
546 |
fetch(apiMainPathSTS + '/GetAccuracyFromRecordedAudio', {
|
547 |
method: "post",
|
548 |
body: JSON.stringify({ "title": '', "base64Audio": '', "language": AILanguage }),
|
549 |
-
headers: { "X-Api-Key": STScoreAPIKey }
|
550 |
|
551 |
});
|
552 |
}
|
@@ -555,7 +547,7 @@ catch { }
|
|
555 |
const initializeServer = async () => {
|
556 |
|
557 |
valid_response = false;
|
558 |
-
document.getElementById("main_title").
|
559 |
let number_of_tries = 0;
|
560 |
let maximum_number_of_tries = 4;
|
561 |
|
@@ -569,16 +561,77 @@ const initializeServer = async () => {
|
|
569 |
await fetch(apiMainPathSTS + '/GetAccuracyFromRecordedAudio', {
|
570 |
method: "post",
|
571 |
body: JSON.stringify({ "title": '', "base64Audio": '', "language": AILanguage }),
|
572 |
-
headers: { "X-Api-Key": STScoreAPIKey }
|
573 |
|
574 |
-
}).then(
|
575 |
-
valid_response = true);
|
576 |
serverIsInitialized = true;
|
577 |
}
|
578 |
-
catch
|
579 |
-
{
|
580 |
number_of_tries += 1;
|
581 |
}
|
582 |
}
|
583 |
}
|
584 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
// Audio context initialization
|
2 |
let mediaRecorder, audioChunks, audioBlob, stream, audioRecorded;
|
3 |
const ctx = new AudioContext();
|
|
|
23 |
let currentText, currentIpa, real_transcripts_ipa, matched_transcripts_ipa;
|
24 |
let wordCategories;
|
25 |
let startTime, endTime;
|
26 |
+
let allSamples = {};
|
27 |
+
let currentSamplesObj = {};
|
28 |
+
var timeout = null
|
29 |
|
30 |
// API related variables
|
31 |
let AILanguage = "de"; // Standard is German
|
32 |
|
33 |
+
let STScoreAPIKey = '';
|
|
|
34 |
let apiMainPathSample = '';// 'http://127.0.0.1:3001';// 'https://a3hj0l2j2m.execute-api.eu-central-1.amazonaws.com/Prod';
|
35 |
let apiMainPathSTS = '';// 'https://wrg7ayuv7i.execute-api.eu-central-1.amazonaws.com/Prod';
|
36 |
|
|
|
57 |
|
58 |
if (currentSoundRecorded)
|
59 |
document.getElementById("playRecordedAudio").classList.remove('disabled');
|
|
|
|
|
60 |
};
|
61 |
|
62 |
const blockUI = () => {
|
|
|
77 |
document.getElementById("buttonNext").onclick = () => getNextSample(); //If error, user can only try to get a new sample
|
78 |
document.getElementById("buttonNext").style["background-color"] = '#58636d';
|
79 |
|
80 |
+
document.getElementById("recorded_ipa_script").innerText = "";
|
81 |
+
document.getElementById("single_word_ipa_pair").innerText = "Error";
|
82 |
+
document.getElementById("ipa_script").innerText = "Error"
|
83 |
|
84 |
+
document.getElementById("main_title").innerText = 'Server Error';
|
85 |
+
document.getElementById("original_script").innerText = 'Server error. Either the daily quota of the server is over or there was some internal error. You can try to generate a new sample in a few seconds. If the error persist, try comming back tomorrow or download the local version from Github :)';
|
86 |
};
|
87 |
|
88 |
const UINotSupported = () => {
|
89 |
unblockUI();
|
90 |
|
91 |
+
document.getElementById("main_title").innerText = "Browser unsupported";
|
92 |
|
93 |
}
|
94 |
|
95 |
const UIRecordingError = () => {
|
96 |
unblockUI();
|
97 |
+
document.getElementById("main_title").innerText = "Recording error, please try again or restart page.";
|
98 |
startMediaDevice();
|
99 |
}
|
100 |
|
|
|
110 |
}
|
111 |
|
112 |
const cacheSoundFiles = async () => {
|
113 |
+
await fetch(soundsPath + '/ASR_good.wav').then(dataSound1 => dataSound1.arrayBuffer()).
|
114 |
then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).
|
115 |
then(decodeAudioData => {
|
116 |
soundFileGood = decodeAudioData;
|
117 |
});
|
118 |
|
119 |
+
await fetch(soundsPath + '/ASR_okay.wav').then(dataSound2 => dataSound2.arrayBuffer()).
|
120 |
then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).
|
121 |
then(decodeAudioData => {
|
122 |
soundFileOkay = decodeAudioData;
|
123 |
});
|
124 |
|
125 |
+
await fetch(soundsPath + '/ASR_bad.wav').then(dataSound3 => dataSound3.arrayBuffer()).
|
126 |
then(arrayBuffer => ctx.decodeAudioData(arrayBuffer)).
|
127 |
then(decodeAudioData => {
|
128 |
soundFileBad = decodeAudioData;
|
|
|
130 |
}
|
131 |
|
132 |
const getNextSample = async () => {
|
133 |
+
await prepareUiForNextSample()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
if (document.getElementById('lengthCat1').checked) {
|
136 |
sample_difficult = 0;
|
|
|
155 |
body: JSON.stringify({
|
156 |
"category": sample_difficult.toString(), "language": AILanguage
|
157 |
}),
|
158 |
+
}).then(res => res.json()).then(dataFromNextSample => {
|
159 |
+
// console.debug(`getNextSample:: dataFromNextSample: `, typeof dataFromNextSample, "=>", dataFromNextSample, "#");
|
160 |
+
populateSampleById(dataFromNextSample)
|
161 |
+
})
|
162 |
+
}
|
163 |
+
catch {
|
164 |
+
UIError();
|
165 |
+
}
|
166 |
|
167 |
+
};
|
168 |
|
169 |
+
const prepareUiForNextSample = async () => {
|
170 |
+
blockUI();
|
171 |
|
172 |
+
if (!serverIsInitialized)
|
173 |
+
await initializeServer();
|
|
|
174 |
|
175 |
+
if (!serverWorking) {
|
176 |
+
UIError();
|
177 |
+
return;
|
178 |
+
}
|
179 |
|
180 |
+
if (soundFileBad == null)
|
181 |
+
cacheSoundFiles();
|
182 |
|
183 |
+
updateScore(parseFloat(document.getElementById("pronunciation_accuracy").innerHTML));
|
|
|
|
|
|
|
|
|
184 |
|
185 |
+
document.getElementById("main_title").innerText = "Processing new sample...";
|
186 |
+
}
|
187 |
|
188 |
+
const populateSampleById = (dataById) => {
|
189 |
+
// console.debug(`populateSampleById:: dataById: `, typeof dataById, "=>", dataById, "#");
|
190 |
+
let doc = document.getElementById("original_script");
|
191 |
+
currentText = dataById.real_transcript;
|
192 |
+
doc.innerText = currentText;
|
193 |
+
currentIpa = dataById.ipa_transcript
|
194 |
|
195 |
+
let doc_ipa = document.getElementById("ipa_script");
|
196 |
+
doc_ipa.innerText = `/ ${String(currentIpa)} /`
|
|
|
197 |
|
198 |
+
document.getElementById("recorded_ipa_script").innerText = ""
|
199 |
+
document.getElementById("pronunciation_accuracy").innerText = "";
|
200 |
+
document.getElementById("single_word_ipa_pair").innerText = "Reference | Spoken"
|
201 |
+
// document.getElementById("section_accuracy").innerText = "| Score: " + currentScore.toString() + " - (" + currentSample.toString() + ")";
|
202 |
+
document.getElementById("section_accuracy").innerText = `| Score: ${currentScore.toString()} - sample n: ${currentSample.toString()}`;
|
203 |
+
currentSample += 1;
|
204 |
|
205 |
+
document.getElementById("main_title").innerText = page_title;
|
206 |
+
document.getElementById("translated_script").innerText = dataById.transcript_translation;
|
207 |
|
208 |
+
currentSoundRecorded = false;
|
209 |
+
unblockUI();
|
210 |
+
document.getElementById("playRecordedAudio").classList.add('disabled');
|
211 |
+
}
|
212 |
|
213 |
const updateRecordingState = async () => {
|
214 |
if (isRecording) {
|
|
|
222 |
}
|
223 |
|
224 |
const generateWordModal = (word_idx) => {
|
225 |
+
innerText0 = wrapWordForPlayingLink(real_transcripts_ipa[word_idx], word_idx, false, "black")
|
226 |
+
innerText1 = wrapWordForPlayingLink(matched_transcripts_ipa[word_idx], word_idx, true, accuracy_colors[parseInt(wordCategories[word_idx])])
|
227 |
+
document.getElementById("single_word_ipa_pair").innerText = `${innerText0} | ${innerText1}`
|
228 |
}
|
229 |
|
230 |
const recordSample = async () => {
|
231 |
|
232 |
+
document.getElementById("main_title").innerText = "Recording... click again when done speaking";
|
233 |
+
document.getElementById("recordIcon").innerText = 'pause_presentation';
|
234 |
blockUI();
|
235 |
document.getElementById("recordAudio").classList.remove('disabled');
|
236 |
audioChunks = [];
|
|
|
244 |
AILanguage = language;
|
245 |
languageFound = false;
|
246 |
let languageIdentifier, languageName;
|
247 |
+
document.getElementById("field-filter-samples").value = "";
|
248 |
switch (language) {
|
249 |
case 'de':
|
250 |
+
document.getElementById("languageBox").innerText = "German";
|
|
|
251 |
languageIdentifier = 'de';
|
252 |
languageName = 'Anna';
|
253 |
break;
|
254 |
|
255 |
case 'en':
|
256 |
+
document.getElementById("languageBox").innerText = "English";
|
|
|
257 |
languageIdentifier = 'en';
|
258 |
languageName = 'Daniel';
|
259 |
break;
|
|
|
277 |
}
|
278 |
}
|
279 |
}
|
280 |
+
getTableFromSamples(allSamples, `${AILanguage}_sentence`);
|
281 |
if (generateNewSample)
|
282 |
getNextSample();
|
283 |
}
|
|
|
296 |
stream = _stream
|
297 |
mediaRecorder = new MediaRecorder(stream);
|
298 |
|
299 |
+
let currentSamplesN = 0
|
300 |
mediaRecorder.ondataavailable = event => {
|
301 |
|
302 |
+
currentSamplesN += event.data.length
|
303 |
audioChunks.push(event.data);
|
304 |
};
|
305 |
|
|
|
327 |
await fetch(apiMainPathSTS + '/GetAccuracyFromRecordedAudio', {
|
328 |
method: "post",
|
329 |
body: JSON.stringify({ "title": currentText[0], "base64Audio": audioBase64, "language": AILanguage }),
|
|
|
330 |
|
331 |
}).then(res => res.json()).
|
332 |
+
then(mediaData => {
|
333 |
|
334 |
if (playAnswerSounds)
|
335 |
+
playSoundForAnswerAccuracy(parseFloat(mediaData.pronunciation_accuracy))
|
336 |
|
337 |
+
document.getElementById("recorded_ipa_script").innerText = `/ ${mediaData.ipa_transcript} /`
|
338 |
document.getElementById("recordAudio").classList.add('disabled');
|
339 |
+
document.getElementById("main_title").innerText = page_title;
|
340 |
+
document.getElementById("pronunciation_accuracy").innerText = `${mediaData.pronunciation_accuracy}%`;
|
341 |
|
342 |
+
lettersOfWordAreCorrect = mediaData.is_letter_correct_all_words.split(" ")
|
343 |
|
344 |
|
345 |
+
startTime = mediaData.start_time;
|
346 |
+
endTime = mediaData.end_time;
|
347 |
|
348 |
|
349 |
+
real_transcripts_ipa = mediaData.real_transcripts_ipa.split(" ")
|
350 |
+
matched_transcripts_ipa = mediaData.matched_transcripts_ipa.split(" ")
|
351 |
+
wordCategories = mediaData.pair_accuracy_category.split(" ")
|
352 |
let currentTextWords = currentText[0].split(" ")
|
353 |
|
354 |
coloredWords = "";
|
|
|
405 |
|
406 |
const playAudio = async () => {
|
407 |
|
408 |
+
document.getElementById("main_title").innerText = "Generating sound...";
|
409 |
playWithMozillaApi(currentText[0]);
|
410 |
+
document.getElementById("main_title").innerText = "Current Sound was played";
|
411 |
|
412 |
};
|
413 |
|
|
|
428 |
audioRecorded.addEventListener("ended", function () {
|
429 |
audioRecorded.currentTime = 0;
|
430 |
unblockUI();
|
431 |
+
document.getElementById("main_title").innerText = "Recorded Sound was played";
|
432 |
});
|
433 |
await audioRecorded.play();
|
434 |
|
|
|
442 |
unblockUI();
|
443 |
audioRecorded.pause();
|
444 |
audioRecorded.currentTime = 0;
|
445 |
+
document.getElementById("main_title").innerText = "Recorded Sound was played";
|
446 |
}, endTimeInMs);
|
447 |
|
448 |
}
|
|
|
465 |
const stopRecording = () => {
|
466 |
isRecording = false
|
467 |
mediaRecorder.stop()
|
468 |
+
document.getElementById("main_title").innerText = "Processing audio...";
|
469 |
}
|
470 |
|
471 |
|
472 |
const playCurrentWord = async (word_idx) => {
|
473 |
|
474 |
+
document.getElementById("main_title").innerText = "Generating word...";
|
475 |
playWithMozillaApi(currentText[0].split(' ')[word_idx]);
|
476 |
+
document.getElementById("main_title").innerText = "Word was played";
|
477 |
}
|
478 |
|
479 |
// TODO: Check if fallback is correct
|
|
|
526 |
}
|
527 |
|
528 |
const wrapWordForIndividualPlayback = (word, word_idx) => {
|
|
|
|
|
529 |
return '<a onmouseover="generateWordModal(' + word_idx.toString() + ')" style = " white-space:nowrap; " href="javascript:playNativeAndRecordedWord(' + word_idx.toString() + ')" >' + word + '</a> '
|
|
|
530 |
}
|
531 |
|
532 |
// ########## Function to initialize server ###############
|
533 |
// This is to try to avoid aws lambda cold start
|
534 |
try {
|
535 |
+
fetch(apiMainPathSTS + '/getAllSamples').then(res => res.json()).then(dataAllSamples => {
|
536 |
+
populateAllSamples(dataAllSamples);
|
537 |
+
getTableFromSamples(dataAllSamples, `${AILanguage}_sentence`);
|
538 |
+
});
|
539 |
fetch(apiMainPathSTS + '/GetAccuracyFromRecordedAudio', {
|
540 |
method: "post",
|
541 |
body: JSON.stringify({ "title": '', "base64Audio": '', "language": AILanguage }),
|
|
|
542 |
|
543 |
});
|
544 |
}
|
|
|
547 |
const initializeServer = async () => {
|
548 |
|
549 |
valid_response = false;
|
550 |
+
document.getElementById("main_title").innerText = 'Initializing server, this may take up to 2 minutes...';
|
551 |
let number_of_tries = 0;
|
552 |
let maximum_number_of_tries = 4;
|
553 |
|
|
|
561 |
await fetch(apiMainPathSTS + '/GetAccuracyFromRecordedAudio', {
|
562 |
method: "post",
|
563 |
body: JSON.stringify({ "title": '', "base64Audio": '', "language": AILanguage }),
|
|
|
564 |
|
565 |
+
}).then(valid_response = true);
|
|
|
566 |
serverIsInitialized = true;
|
567 |
}
|
568 |
+
catch {
|
|
|
569 |
number_of_tries += 1;
|
570 |
}
|
571 |
}
|
572 |
}
|
573 |
|
574 |
+
const populateAllSamples = async (allData) => {
|
575 |
+
allSamples = allData;
|
576 |
+
}
|
577 |
+
|
578 |
+
const getTableFromSamples = async (obj, lang, isFiltered = false) => {
|
579 |
+
let table = document.getElementById("field-samples");
|
580 |
+
table.innerHTML = "";
|
581 |
+
let objLang = obj[lang];
|
582 |
+
// let lenAllSamples = Object.entries(allSamples[lang]).length;
|
583 |
+
// console.debug(`getTableFromSamples, isFiltered: ${isFiltered}, typeof objLang: ${typeof objLang}, lenAllSamples:${lenAllSamples}, objLang.length: `, Object.entries(objLang).length, "#", objLang);
|
584 |
+
for (let key2 in objLang) {
|
585 |
+
var tr = createTableRow(`${key2}: ${objLang[key2]} || ${lang}.`, key2, isFiltered);
|
586 |
+
table.appendChild(tr);
|
587 |
+
}
|
588 |
+
table.appendChild(tr);
|
589 |
+
}
|
590 |
+
|
591 |
+
const createTableRow = (contentRow, sampleIdx, isFiltered = false) => {
|
592 |
+
var tr = document.createElement('tr');
|
593 |
+
tr.append(`${contentRow}`);
|
594 |
+
tr.onclick = async function () {
|
595 |
+
await prepareUiForNextSample()
|
596 |
+
// console.debug(`createTableRow:: ${isFiltered}, sampleIdx: `, sampleIdx);
|
597 |
+
await fetch(apiMainPathSample + '/getSample', {
|
598 |
+
method: "post",
|
599 |
+
body: JSON.stringify({
|
600 |
+
"language": AILanguage, "idx": sampleIdx
|
601 |
+
}),
|
602 |
+
}).then(res => {
|
603 |
+
let res2json = res.json()
|
604 |
+
// console.debug(`createTableRow:: ${isFiltered}, res2json: `, typeof res2json, "=>", res2json, "#");
|
605 |
+
return res2json
|
606 |
+
}).then(dataOnRowCreation => {
|
607 |
+
// console.debug(`createTableRow:: ${isFiltered}, dataOnRowCreation: `, typeof dataOnRowCreation, "=>", dataOnRowCreation, "#");
|
608 |
+
populateSampleById(dataOnRowCreation)
|
609 |
+
tr.style["background-color"] = "#f0f0f0";
|
610 |
+
})
|
611 |
+
};
|
612 |
+
return tr;
|
613 |
+
}
|
614 |
+
|
615 |
+
const filterAllSamples = async (obj, filter, lang) => {
|
616 |
+
if (filter == "") {
|
617 |
+
currentSamplesObj = {...obj}
|
618 |
+
};
|
619 |
+
objByLAng = obj[lang];
|
620 |
+
const filtered = Object.entries(objByLAng).filter(([key, value]) => value.toLowerCase().includes(filter));
|
621 |
+
currentSamplesObj = {
|
622 |
+
[lang]: Object.entries(filtered).map(([key, value]) => value[1])
|
623 |
+
};
|
624 |
+
}
|
625 |
+
|
626 |
+
// todo: fix the request from the rows filtered not working
|
627 |
+
$(document).ready(function(){
|
628 |
+
$("#field-filter-samples").on("keyup", function(e) {
|
629 |
+
e.preventDefault();
|
630 |
+
var keycode = (e.keyCode ? e.keyCode : e.which);
|
631 |
+
if (keycode === 13 || e.key === 'Enter') {
|
632 |
+
var valueFilter = $(this).val().toLowerCase();
|
633 |
+
filterAllSamples(allSamples, valueFilter, `${AILanguage}_sentence`)
|
634 |
+
getTableFromSamples(currentSamplesObj, `${AILanguage}_sentence`, true);
|
635 |
+
}
|
636 |
+
});
|
637 |
+
});
|
static/main.html
CHANGED
@@ -68,6 +68,11 @@
|
|
68 |
|
69 |
</div>
|
70 |
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
<div class="container">
|
73 |
|
@@ -116,26 +121,16 @@
|
|
116 |
<span></span>
|
117 |
</button>
|
118 |
</div>
|
119 |
-
</div>
|
120 |
-
|
121 |
|
122 |
-
|
123 |
-
|
124 |
-
<div class="container-small flex expand"
|
125 |
-
style="align-items: center; text-align: center; vertical-align:middle; ">
|
126 |
-
<p id="single_word_ipa_pair" class="expand ipa-text-small"
|
127 |
-
style="text-align: center; vertical-align: middle;">Reference | Spoken
|
128 |
-
</p>
|
129 |
</div>
|
130 |
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
</
|
136 |
</div>
|
137 |
|
138 |
-
|
139 |
<div id="radio-difficulty" class="radio" style="position: fixed; top: 95%; left: 2%;">
|
140 |
<input label="Random" type="radio" id="lengthCat1" name='length' onclick="javascript:getNextSample()">
|
141 |
<input label="Easy" type="radio" id="lengthCat2" name='length' checked onclick="javascript:getNextSample()">
|
@@ -143,6 +138,13 @@
|
|
143 |
<input label="Hard" type="radio" id="lengthCat4" name='length' onclick="javascript:getNextSample()">
|
144 |
</div>
|
145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
</div>
|
147 |
|
148 |
|
|
|
68 |
|
69 |
</div>
|
70 |
|
71 |
+
<div id="btn-record" class="expanded mic-button-div">
|
72 |
+
<a id="recordAudio" href="javascript:updateRecordingState()" class="round-button-mic disabled"
|
73 |
+
style="color:white; text-align:center; "><i id="recordIcon" class="material-icons icon-text-mic">mic</i>
|
74 |
+
</a>
|
75 |
+
</div>
|
76 |
|
77 |
<div class="container">
|
78 |
|
|
|
121 |
<span></span>
|
122 |
</button>
|
123 |
</div>
|
|
|
|
|
124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
</div>
|
126 |
|
127 |
+
<div class="container2">
|
128 |
+
<div id="div-field-filter-samples" style="position: absolute; width: 97%; margin: 1em;">
|
129 |
+
<input id="field-filter-samples" type="search" class="form-control" placeholder="Write and press enter to filter">
|
130 |
+
<div id="field-samples">{}</div>
|
131 |
+
</div>
|
132 |
</div>
|
133 |
|
|
|
134 |
<div id="radio-difficulty" class="radio" style="position: fixed; top: 95%; left: 2%;">
|
135 |
<input label="Random" type="radio" id="lengthCat1" name='length' onclick="javascript:getNextSample()">
|
136 |
<input label="Easy" type="radio" id="lengthCat2" name='length' checked onclick="javascript:getNextSample()">
|
|
|
138 |
<input label="Hard" type="radio" id="lengthCat4" name='length' onclick="javascript:getNextSample()">
|
139 |
</div>
|
140 |
|
141 |
+
<div class="container-small flex expand"
|
142 |
+
style="align-items: center; text-align: center; vertical-align:middle; ">
|
143 |
+
<p id="single_word_ipa_pair" class="expand ipa-text-small"
|
144 |
+
style="text-align: center; vertical-align: middle;">Reference | Spoken
|
145 |
+
</p>
|
146 |
+
</div>
|
147 |
+
|
148 |
</div>
|
149 |
|
150 |
|
webApp.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
import json
|
2 |
-
import os
|
3 |
import webbrowser
|
4 |
|
5 |
-
from
|
|
|
6 |
from flask_cors import CORS
|
7 |
|
8 |
from aip_trainer.lambdas import lambdaGetSample
|
@@ -22,6 +22,34 @@ def main():
|
|
22 |
return render_template('main.html')
|
23 |
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
@app.route(rootPath+'/getAudioFromText', methods=['POST'])
|
26 |
def getAudioFromText():
|
27 |
event = {'body': json.dumps(request.get_json(force=True))}
|
@@ -39,15 +67,15 @@ def GetAccuracyFromRecordedAudio():
|
|
39 |
try:
|
40 |
event = {'body': json.dumps(request.get_json(force=True))}
|
41 |
lambda_correct_output = lambdaSpeechToScore.lambda_handler(event, [])
|
|
|
42 |
except Exception as e:
|
43 |
import traceback
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
|
48 |
|
49 |
if __name__ == "__main__":
|
50 |
language = 'de'
|
51 |
-
print(os.system('pwd'))
|
52 |
webbrowser.open_new('http://127.0.0.1:3000/')
|
53 |
-
app.run(host="0.0.0.0", port=3000)
|
|
|
1 |
import json
|
|
|
2 |
import webbrowser
|
3 |
|
4 |
+
from aip_trainer import PROJECT_ROOT_FOLDER, app_logger
|
5 |
+
from flask import Flask, render_template, request, Response
|
6 |
from flask_cors import CORS
|
7 |
|
8 |
from aip_trainer.lambdas import lambdaGetSample
|
|
|
22 |
return render_template('main.html')
|
23 |
|
24 |
|
25 |
+
@app.route(rootPath+'/getAllSamples')
|
26 |
+
def getDataDeEnAll():
|
27 |
+
import pickle
|
28 |
+
from pathlib import Path
|
29 |
+
sample_folder = Path(PROJECT_ROOT_FOLDER / "aip_trainer" / "lambdas")
|
30 |
+
with open(sample_folder / 'data_de_en_2.pickle', 'rb') as handle:
|
31 |
+
df = pickle.load(handle)
|
32 |
+
j = df.to_json()
|
33 |
+
return Response(j, mimetype='application/json')
|
34 |
+
|
35 |
+
|
36 |
+
@app.route(rootPath+'/getSampleSearch', methods=['POST'])
|
37 |
+
def getDataDeEnSearch():
|
38 |
+
import pickle
|
39 |
+
from pathlib import Path
|
40 |
+
sample_folder = Path(PROJECT_ROOT_FOLDER / "aip_trainer" / "lambdas")
|
41 |
+
with open(sample_folder / 'data_de_en_2.pickle', 'rb') as handle:
|
42 |
+
event = request.get_json(force=True)
|
43 |
+
df = pickle.load(handle)
|
44 |
+
lang = event.get('language')
|
45 |
+
filter_key = event.get('search')
|
46 |
+
df_by_language = df[f"{lang}_sentence"]
|
47 |
+
filter_obj = df_by_language.str.contains(filter_key)
|
48 |
+
filtered = df_by_language[filter_obj]
|
49 |
+
j = filtered.to_json()
|
50 |
+
return Response(j, mimetype='application/json')
|
51 |
+
|
52 |
+
|
53 |
@app.route(rootPath+'/getAudioFromText', methods=['POST'])
|
54 |
def getAudioFromText():
|
55 |
event = {'body': json.dumps(request.get_json(force=True))}
|
|
|
67 |
try:
|
68 |
event = {'body': json.dumps(request.get_json(force=True))}
|
69 |
lambda_correct_output = lambdaSpeechToScore.lambda_handler(event, [])
|
70 |
+
return lambda_correct_output
|
71 |
except Exception as e:
|
72 |
import traceback
|
73 |
+
app_logger.error(e)
|
74 |
+
app_logger.error(traceback.format_exc())
|
75 |
+
raise e
|
76 |
|
77 |
|
78 |
if __name__ == "__main__":
|
79 |
language = 'de'
|
|
|
80 |
webbrowser.open_new('http://127.0.0.1:3000/')
|
81 |
+
app.run(host="0.0.0.0", port=3000) # , debug=True)
|