Spaces:
Running
on
Zero
Running
on
Zero
alfredplpl
commited on
Commit
•
87eb439
1
Parent(s):
0ee3851
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
|
|
8 |
from threading import Thread
|
9 |
import torch
|
10 |
|
11 |
-
|
12 |
|
13 |
DESCRIPTION = '''
|
14 |
<div>
|
@@ -47,11 +47,12 @@ h1 {
|
|
47 |
"""
|
48 |
|
49 |
# Load the tokenizer and model
|
50 |
-
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-jpn-it")
|
51 |
model = AutoModelForCausalLM.from_pretrained(
|
52 |
"google/gemma-2-2b-jpn-it",
|
53 |
device_map="auto",
|
54 |
torch_dtype=torch.bfloat16,
|
|
|
55 |
)
|
56 |
|
57 |
@spaces.GPU()
|
|
|
8 |
from threading import Thread
|
9 |
import torch
|
10 |
|
11 |
+
HF_TOKEN=os.environ["TOKEN"]
|
12 |
|
13 |
DESCRIPTION = '''
|
14 |
<div>
|
|
|
47 |
"""
|
48 |
|
49 |
# Load the tokenizer and model
|
50 |
+
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-jpn-it",token=HF_TOKEN)
|
51 |
model = AutoModelForCausalLM.from_pretrained(
|
52 |
"google/gemma-2-2b-jpn-it",
|
53 |
device_map="auto",
|
54 |
torch_dtype=torch.bfloat16,
|
55 |
+
token=HF_TOKEN
|
56 |
)
|
57 |
|
58 |
@spaces.GPU()
|