unijoh commited on
Commit
807daef
1 Parent(s): b20a6cf

Create lid.py

Browse files
Files changed (1) hide show
  1. lid.py +41 -0
lid.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor
2
+ import torch
3
+ import librosa
4
+
5
+ model_id = "facebook/mms-lid-1024"
6
+
7
+ processor = AutoFeatureExtractor.from_pretrained(model_id)
8
+ model = Wav2Vec2ForSequenceClassification.from_pretrained(model_id)
9
+
10
+ LID_SAMPLING_RATE = 16_000
11
+ LID_THRESHOLD = 0.33
12
+
13
+ LID_LANGUAGES = {}
14
+ with open(f"data/lid/all_langs.tsv") as f:
15
+ for line in f:
16
+ iso, name = line.split(" ", 1)
17
+ LID_LANGUAGES[iso] = name.strip()
18
+
19
+ def identify_language(audio_source=None, microphone=None, file_upload=None):
20
+ audio_fp = file_upload if "upload" in str(audio_source or "").lower() else microphone
21
+ if audio_fp is None:
22
+ return "ERROR: You have to either use the microphone or upload an audio file"
23
+
24
+ audio_samples = librosa.load(audio_fp, sr=LID_SAMPLING_RATE, mono=True)[0]
25
+ inputs = processor(audio_samples, sampling_rate=LID_SAMPLING_RATE, return_tensors="pt")
26
+
27
+ # Set device
28
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
29
+ model.to(device)
30
+ inputs = inputs.to(device)
31
+
32
+ with torch.no_grad():
33
+ logit = model(**inputs).logits
34
+
35
+ logit_lsm = torch.log_softmax(logit.squeeze(), dim=-1)
36
+ scores, indices = torch.topk(logit_lsm, 5, dim=-1)
37
+ scores, indices = torch.exp(scores).to("cpu").tolist(), indices.to("cpu").tolist()
38
+ iso2score = {model.config.id2label[int(i)]: s for s, i in zip(scores, indices)}
39
+ if max(iso2score.values()) < LID_THRESHOLD:
40
+ return "Low confidence in the language identification predictions. Output is not shown!"
41
+ return {LID_LANGUAGES[iso]: score for iso, score in iso2score.items()}