sidpanda commited on
Commit
6011925
·
verified ·
1 Parent(s): 3f8ce4f

test gemma

Browse files
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -16,16 +16,20 @@ import librosa
16
  import pyloudnorm as pyln
17
 
18
  # Load the models and tokenizers
19
- model1 = Wav2Vec2ForCTC.from_pretrained("ai4bharat/indicwav2vec-hindi")
20
- tokenizer1 = Wav2Vec2Tokenizer.from_pretrained("ai4bharat/indicwav2vec-hindi")
 
 
21
  # Loading the tokenizer and model from Hugging Face's model hub.
22
- tokenizer = AutoTokenizer.from_pretrained("soketlabs/pragna-1b", token=os.environ.get('HF_TOKEN'))
23
- model = AutoModelForCausalLM.from_pretrained(
24
- "soketlabs/pragna-1b",
25
- token=os.environ.get('HF_TOKEN'),
26
- revision='3c5b8b1309f7d89710331ba2f164570608af0de7'
27
- )
28
- model.load_adapter('soketlabs/pragna-1b-it-v0.1', token=os.environ.get('HF_TOKEN'))
 
 
29
 
30
  # using CUDA for an optimal experience
31
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
16
  import pyloudnorm as pyln
17
 
18
  # Load the models and tokenizers
19
+ # model1 = Wav2Vec2ForCTC.from_pretrained("ai4bharat/indicwav2vec-hindi")
20
+ # tokenizer1 = Wav2Vec2Tokenizer.from_pretrained("ai4bharat/indicwav2vec-hindi")
21
+ model1 = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h")
22
+ tokenizer1 = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-large-960h")
23
  # Loading the tokenizer and model from Hugging Face's model hub.
24
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
25
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=os.environ.get('HF_TOKEN'))
26
+ # tokenizer = AutoTokenizer.from_pretrained("soketlabs/pragna-1b", token=os.environ.get('HF_TOKEN'))
27
+ # model = AutoModelForCausalLM.from_pretrained(
28
+ # "soketlabs/pragna-1b",
29
+ # token=os.environ.get('HF_TOKEN'),
30
+ # revision='3c5b8b1309f7d89710331ba2f164570608af0de7'
31
+ # )
32
+ # model.load_adapter('soketlabs/pragna-1b-it-v0.1', token=os.environ.get('HF_TOKEN'))
33
 
34
  # using CUDA for an optimal experience
35
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')