Futuresony commited on
Commit
0b5f65d
·
verified ·
1 Parent(s): c2f7063

Update asr.py

Browse files
Files changed (1) hide show
  1. asr.py +44 -45
asr.py CHANGED
@@ -1,59 +1,58 @@
1
  import librosa
2
  import torch
3
  import numpy as np
4
- import langid # Language detection library
5
  from transformers import Wav2Vec2ForCTC, AutoProcessor
6
 
7
  ASR_SAMPLING_RATE = 16_000
8
- MODEL_ID = "facebook/mms-1b-all"
9
 
10
- # Load MMS Model
11
- processor = AutoProcessor.from_pretrained(MODEL_ID)
12
- model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
13
- model.eval()
 
 
 
 
14
 
15
  def detect_language(text):
16
- """Detects language using langid (fast & lightweight)."""
17
  lang, _ = langid.classify(text)
18
- return lang if lang in ["en", "sw"] else "en" # Default to English
19
 
20
  def transcribe_auto(audio_data=None):
21
  if not audio_data:
22
  return "<<ERROR: Empty Audio Input>>"
23
-
24
- # Process Microphone Input
25
- if isinstance(audio_data, tuple):
26
- sr, audio_samples = audio_data
27
- audio_samples = (audio_samples / 32768.0).astype(np.float32)
28
- if sr != ASR_SAMPLING_RATE:
29
- audio_samples = librosa.resample(audio_samples, orig_sr=sr, target_sr=ASR_SAMPLING_RATE)
30
-
31
- # Process File Upload Input
32
- else:
33
- if not isinstance(audio_data, str):
34
- return "<<ERROR: Invalid Audio Input>>"
35
- audio_samples = librosa.load(audio_data, sr=ASR_SAMPLING_RATE, mono=True)[0]
36
-
37
- inputs = processor(audio_samples, sampling_rate=ASR_SAMPLING_RATE, return_tensors="pt")
38
-
39
- # **Step 1: Transcribe without Language Detection**
40
- with torch.no_grad():
41
- outputs = model(**inputs).logits
42
- ids = torch.argmax(outputs, dim=-1)[0]
43
- raw_transcription = processor.decode(ids)
44
-
45
- # **Step 2: Detect Language from Transcription**
46
- detected_lang = detect_language(raw_transcription)
47
- lang_code = "eng" if detected_lang == "en" else "swh"
48
-
49
- # **Step 3: Reload Model with Correct Adapter**
50
- processor.tokenizer.set_target_lang(lang_code)
51
- model.load_adapter(lang_code)
52
-
53
- # **Step 4: Transcribe Again with Correct Adapter**
54
- with torch.no_grad():
55
- outputs = model(**inputs).logits
56
- ids = torch.argmax(outputs, dim=-1)[0]
57
- final_transcription = processor.decode(ids)
58
-
59
- return f"Detected Language: {detected_lang.upper()}\n\nTranscription:\n{final_transcription}"
 
1
  import librosa
2
  import torch
3
  import numpy as np
4
+ import langid
5
  from transformers import Wav2Vec2ForCTC, AutoProcessor
6
 
7
  ASR_SAMPLING_RATE = 16_000
8
+ MODEL_ID = "facebook/mms-1b-all" # Or your model ID
9
 
10
+ # Load MMS Model (outside the function, for efficiency)
11
+ try:
12
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
13
+ model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
14
+ model.eval()
15
+ except Exception as e:
16
+ print(f"Error loading initial model: {e}") # Handle initial model loading errors
17
+ exit(1) # Or raise the exception if you prefer
18
 
19
  def detect_language(text):
 
20
  lang, _ = langid.classify(text)
21
+ return lang if lang in ["en", "sw"] else "en"
22
 
23
  def transcribe_auto(audio_data=None):
24
  if not audio_data:
25
  return "<<ERROR: Empty Audio Input>>"
26
+
27
+ # ... (audio processing code remains the same) ...
28
+
29
+ try: # Wrap the entire transcription process
30
+ # **Step 1: Transcribe without Language Detection**
31
+ with torch.no_grad():
32
+ outputs = model(**inputs).logits
33
+ ids = torch.argmax(outputs, dim=-1)[0]
34
+ raw_transcription = processor.decode(ids)
35
+
36
+ # **Step 2: Detect Language from Transcription**
37
+ detected_lang = detect_language(raw_transcription)
38
+ lang_code = "eng" if detected_lang == "en" else "swh"
39
+
40
+ # **Step 3: Reload Model with Correct Adapter (CRITICAL CHANGE)**
41
+ try: # Wrap adapter loading
42
+ processor.tokenizer.set_target_lang(lang_code)
43
+ model.load_adapter(lang_code) # This is the most likely source of errors
44
+ except Exception as adapter_error: # Catch adapter loading errors
45
+ print(f"Error loading adapter for {detected_lang}: {adapter_error}")
46
+ return f"<<ERROR: Could not load adapter for {detected_lang}>>" # Or raise
47
+
48
+ # **Step 4: Transcribe Again with Correct Adapter**
49
+ with torch.no_grad():
50
+ outputs = model(**inputs).logits
51
+ ids = torch.argmax(outputs, dim=-1)[0]
52
+ final_transcription = processor.decode(ids)
53
+
54
+ return f"Detected Language: {detected_lang.upper()}\n\nTranscription:\n{final_transcription}"
55
+
56
+ except Exception as overall_error: # Catch any other errors during transcription
57
+ print(f"An error occurred during transcription: {overall_error}")
58
+ return f"<<ERROR: {overall_error}>>"