eswardivi commited on
Commit
ab4d817
1 Parent(s): fca3d9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -10,7 +10,7 @@ model = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-7b-it",
10
  # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
11
  torch_dtype=torch.float16,
12
  token=token)
13
- tok = AutoTokenizer.from_pretrained("google/gemma-1.1-7b-it,token=token)
14
  # using CUDA for an optimal experience
15
  # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
16
  if torch.cuda.is_available():
@@ -58,5 +58,5 @@ def chat(message, history):
58
 
59
 
60
 
61
- demo = gr.ChatInterface(fn=chat, examples=[["Write me a poem about Machine Learning."]], title="gemma 2b-it")
62
  demo.launch()
 
10
  # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
11
  torch_dtype=torch.float16,
12
  token=token)
13
+ tok = AutoTokenizer.from_pretrained("google/gemma-1.1-7b-it",token=token)
14
  # using CUDA for an optimal experience
15
  # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
16
  if torch.cuda.is_available():
 
58
 
59
 
60
 
61
+ demo = gr.ChatInterface(fn=chat, examples=[["Write me a poem about Machine Learning."]], title="gemma-1.1-7b-it"")
62
  demo.launch()