Binh Nguyen commited on
Commit
e600dee
β€’
1 Parent(s): 673507e

add torchaudio

Browse files
Files changed (2) hide show
  1. app.py +14 -8
  2. requirements.txt +1 -2
app.py CHANGED
@@ -3,9 +3,9 @@ from transformers.file_utils import cached_path, hf_bucket_url
3
  import os, zipfile
4
  from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
5
  from datasets import load_dataset
6
- import soundfile as sf
7
  import torch
8
  import kenlm
 
9
  from pyctcdecode import Alphabet, BeamSearchDecoderCTC, LanguageModel
10
 
11
  cache_dir = './cache/'
@@ -17,7 +17,6 @@ with zipfile.ZipFile(lm_file, 'r') as zip_ref:
17
  zip_ref.extractall(cache_dir)
18
  lm_file = cache_dir + 'vi_lm_4grams.bin'
19
 
20
-
21
  def get_decoder_ngram_model(tokenizer, ngram_lm_path):
22
  vocab_dict = tokenizer.get_vocab()
23
  sort_vocab = sorted((value, key) for (key, value) in vocab_dict.items())
@@ -41,17 +40,25 @@ def get_decoder_ngram_model(tokenizer, ngram_lm_path):
41
  ngram_lm_model = get_decoder_ngram_model(processor.tokenizer, lm_file)
42
 
43
  # define function to read in sound file
44
- def map_to_array(batch):
45
- speech, sampling_rate = sf.read(batch["file"])
46
- batch["speech"] = speech
47
- batch["sampling_rate"] = sampling_rate
 
 
 
 
 
 
 
 
48
  return batch
49
 
50
  # tokenize
51
  def inference(audio):
52
  # read in sound file
53
  # load dummy dataset and read soundfiles
54
- ds = map_to_array({"file": audio})
55
  # infer model
56
  input_values = processor(
57
  ds["speech"],
@@ -59,7 +66,6 @@ def inference(audio):
59
  return_tensors="pt"
60
  ).input_values
61
  # decode ctc output
62
- logits = model(input_values).logits[0]
63
  pred_ids = torch.argmax(logits, dim=-1)
64
  greedy_search_output = processor.decode(pred_ids)
65
  beam_search_output = ngram_lm_model.decode(logits.cpu().detach().numpy(), beam_width=500)
 
3
  import os, zipfile
4
  from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
5
  from datasets import load_dataset
 
6
  import torch
7
  import kenlm
8
+ import torchaudio
9
  from pyctcdecode import Alphabet, BeamSearchDecoderCTC, LanguageModel
10
 
11
  cache_dir = './cache/'
 
17
  zip_ref.extractall(cache_dir)
18
  lm_file = cache_dir + 'vi_lm_4grams.bin'
19
 
 
20
  def get_decoder_ngram_model(tokenizer, ngram_lm_path):
21
  vocab_dict = tokenizer.get_vocab()
22
  sort_vocab = sorted((value, key) for (key, value) in vocab_dict.items())
 
40
  ngram_lm_model = get_decoder_ngram_model(processor.tokenizer, lm_file)
41
 
42
  # define function to read in sound file
43
+ def speech_file_to_array_fn(path, max_seconds=10):
44
+ batch = {"file": path}
45
+ speech_array, sampling_rate = torchaudio.load(batch["file"])
46
+ if sampling_rate != 16000:
47
+ transform = torchaudio.transforms.Resample(orig_freq=sampling_rate,
48
+ new_freq=16000)
49
+ speech_array = transform(speech_array)
50
+ speech_array = speech_array[0]
51
+ if max_seconds > 0:
52
+ speech_array = speech_array[:max_seconds*16000]
53
+ batch["speech"] = speech_array.numpy()
54
+ batch["sampling_rate"] = 16000
55
  return batch
56
 
57
  # tokenize
58
  def inference(audio):
59
  # read in sound file
60
  # load dummy dataset and read soundfiles
61
+ ds = speech_file_to_array_fn({"file": audio})
62
  # infer model
63
  input_values = processor(
64
  ds["speech"],
 
66
  return_tensors="pt"
67
  ).input_values
68
  # decode ctc output
 
69
  pred_ids = torch.argmax(logits, dim=-1)
70
  greedy_search_output = processor.decode(pred_ids)
71
  beam_search_output = ngram_lm_model.decode(logits.cpu().detach().numpy(), beam_width=500)
requirements.txt CHANGED
@@ -1,7 +1,6 @@
1
  torch==1.9.0
 
2
  transformers==4.9.2
3
- soundfile
4
- librosa
5
  datasets==1.11.0
6
  pyctcdecode==v0.1.0
7
  https://github.com/kpu/kenlm/archive/master.zip
 
1
  torch==1.9.0
2
+ torchaudio==0.9.0
3
  transformers==4.9.2
 
 
4
  datasets==1.11.0
5
  pyctcdecode==v0.1.0
6
  https://github.com/kpu/kenlm/archive/master.zip