Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,3 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
-
import transformers
|
3 |
-
import torch
|
4 |
import gradio as gr
|
5 |
|
6 |
-
|
7 |
-
dtype = torch.bfloat16
|
8 |
-
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
10 |
-
model = AutoModelForCausalLM.from_pretrained(
|
11 |
-
model_id,
|
12 |
-
device_map="cuda",
|
13 |
-
torch_dtype=dtype,
|
14 |
-
)
|
15 |
-
|
16 |
-
chat = [
|
17 |
-
{ "role": "user", "content": "Write a hello world program" },
|
18 |
-
]
|
19 |
-
prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
gr.load("models/google/gemma-7b-it").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|