Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -18,12 +18,11 @@ quantization_config = BitsAndBytesConfig(
|
|
18 |
)
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
21 |
-
"
|
22 |
)
|
23 |
-
tok = AutoTokenizer.from_pretrained("
|
24 |
terminators = [
|
25 |
tok.eos_token_id,
|
26 |
-
tok.convert_tokens_to_ids("<|eot_id|>")
|
27 |
]
|
28 |
|
29 |
if torch.cuda.is_available():
|
@@ -37,7 +36,7 @@ else:
|
|
37 |
# Dispatch Errors
|
38 |
|
39 |
|
40 |
-
@spaces.GPU(duration=
|
41 |
def chat(message, history, temperature,do_sample, max_tokens):
|
42 |
start_time = time.time()
|
43 |
chat = []
|
@@ -49,7 +48,7 @@ def chat(message, history, temperature,do_sample, max_tokens):
|
|
49 |
messages = tok.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
50 |
model_inputs = tok([messages], return_tensors="pt").to(device)
|
51 |
streamer = TextIteratorStreamer(
|
52 |
-
tok, timeout=
|
53 |
)
|
54 |
generate_kwargs = dict(
|
55 |
model_inputs,
|
@@ -105,6 +104,6 @@ demo = gr.ChatInterface(
|
|
105 |
],
|
106 |
stop_btn="Stop Generation",
|
107 |
title="Chat With LLMs",
|
108 |
-
description="Now Running [
|
109 |
)
|
110 |
demo.launch()
|
|
|
18 |
)
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
21 |
+
"microsoft/Phi-3-mini-128k-instruct", quantization_config=quantization_config, token=token
|
22 |
)
|
23 |
+
tok = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct", token=token)
|
24 |
terminators = [
|
25 |
tok.eos_token_id,
|
|
|
26 |
]
|
27 |
|
28 |
if torch.cuda.is_available():
|
|
|
36 |
# Dispatch Errors
|
37 |
|
38 |
|
39 |
+
@spaces.GPU(duration=60)
|
40 |
def chat(message, history, temperature,do_sample, max_tokens):
|
41 |
start_time = time.time()
|
42 |
chat = []
|
|
|
48 |
messages = tok.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
49 |
model_inputs = tok([messages], return_tensors="pt").to(device)
|
50 |
streamer = TextIteratorStreamer(
|
51 |
+
tok, timeout=20.0, skip_prompt=True, skip_special_tokens=True
|
52 |
)
|
53 |
generate_kwargs = dict(
|
54 |
model_inputs,
|
|
|
104 |
],
|
105 |
stop_btn="Stop Generation",
|
106 |
title="Chat With LLMs",
|
107 |
+
description="Now Running [microsoft/Phi-3-mini-128k-instruct](https://huggingface.com/microsoft/Phi-3-mini-128k-instruct) in 4bit"
|
108 |
)
|
109 |
demo.launch()
|