injilashah commited on
Commit
97d06ed
·
verified ·
1 Parent(s): 88d01fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,8 +6,8 @@ hf_token = os.environ["hf_token"]
6
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
7
 
8
 
9
- b_tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-7b1")#using small parameter version of model for faster inference on hf
10
- b_model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-7b1",device_map = "auto")
11
 
12
  g_tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b",token = hf_token)#using small paramerter version of model for faster inference on hf
13
  g_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b",token = hf_token)
 
6
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
7
 
8
 
9
+ b_tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b1")#using small parameter version of model for faster inference on hf
10
+ b_model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-1b1",device_map = "auto")
11
 
12
  g_tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b",token = hf_token)#using small paramerter version of model for faster inference on hf
13
  g_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b",token = hf_token)