samuelleecong commited on
Commit
43f3b59
1 Parent(s): b04a665

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -5,8 +5,8 @@ from datasets import load_dataset
5
 
6
  from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
7
 
8
- from transformers import M2M100ForConditionalGeneration
9
- from tokenization_small100 import SMALL100Tokenizer
10
 
11
 
12
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
@@ -16,10 +16,9 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
16
 
17
  asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device, generate_kwargs = {"task": "translate"})
18
 
19
- model = M2M100ForConditionalGeneration.from_pretrained("alirezamsh/small100")
20
- tokenizer = SMALL100Tokenizer.from_pretrained("alirezamsh/small100", tgt_lang="sw")
21
-
22
 
 
 
23
 
24
  # load text-to-speech checkpoint and speaker embeddings
25
  # processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
 
5
 
6
  from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
7
 
8
+ from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
9
+ # from tokenization_small100 import SMALL100Tokenizer
10
 
11
 
12
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
 
16
 
17
  asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device, generate_kwargs = {"task": "translate"})
18
 
 
 
 
19
 
20
+ model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
21
+ tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
22
 
23
  # load text-to-speech checkpoint and speaker embeddings
24
  # processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")