eswardivi commited on
Commit
1423f0e
1 Parent(s): 64d8a64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,11 +6,11 @@ from threading import Thread
6
  import spaces
7
 
8
  token = os.environ["HF_TOKEN"]
9
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it",
10
  # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
11
  torch_dtype=torch.float16,
12
  token=token)
13
- tok = AutoTokenizer.from_pretrained("google/gemma-2b-it",token=token)
14
  # using CUDA for an optimal experience
15
  # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
16
  if torch.cuda.is_available():
 
6
  import spaces
7
 
8
  token = os.environ["HF_TOKEN"]
9
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it",
10
  # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
11
  torch_dtype=torch.float16,
12
  token=token)
13
+ tok = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it",token=token)
14
  # using CUDA for an optimal experience
15
  # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
16
  if torch.cuda.is_available():