File size: 2,393 Bytes
4098e9b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
const { createApp, ref, onMounted, computed, watch } = Vue;
import { HfInference } from "https://cdn.skypack.dev/@huggingface/inference@latest";
const app = createApp({
setup() {
const token = ref(localStorage.getItem("token") || "");
const models = ref(["openai/whisper-tiny", "facebook/wav2vec2-large-960h-lv60-self", "openai/whisper-large-v2"]);
const selectedAudio = ref("clear-audio-1.wav");
const selectedModel = ref("");
const loading = ref(false);
const didErrorOccur = ref(false)
const audioFiles = ref(["clear-audio-1.wav", "clear-audio-2.wav",
"unclear-audio-1.wav", "unclear-audio-2.wav"]);
const recognizedText = ref("")
const statusMessage = computed(() => {
if (loading.value) return "Loading..."
return "Ready"
})
const run = async () => {
reset()
loading.value = true;
try {
const hf = new HfInference(token.value);
const audioData = await (await fetch(selectedAudio.value)).arrayBuffer()
const result = await hf.automaticSpeechRecognition({
data: audioData,
model: selectedModel.value
});
console.log(result)
recognizedText.value = result.text
loading.value = false;
} catch (e) {
console.error(e);
loading.value = false;
didErrorOccur.value = true
}
};
const reset = () => {
didErrorOccur.value = false
loading.value = false
recognizedText.value = ""
}
watch(selectedAudio, () => {
reset()
})
watch(selectedModel, () => {
reset()
})
onMounted(async () => {
const localStorageToken = localStorage.getItem("token")
if (localStorageToken) {
token.value = localStorageToken;
}
selectedModel.value = models.value[0]
});
return {
token,
run,
audioFiles,
selectedAudio,
models,
selectedModel,
loading,
statusMessage,
recognizedText
};
},
});
app.mount("#app");
|