Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
5 |
from threading import Thread
|
6 |
|
7 |
# Loading the tokenizer and model from Hugging Face's model hub.
|
8 |
-
#
|
9 |
-
|
10 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
11 |
-
model = AutoModelForCausalLM.from_pretrained(model_name,trust_remote_code=True)
|
|
|
12 |
|
13 |
# using CUDA for an optimal experience
|
14 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel
|
4 |
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
5 |
from threading import Thread
|
6 |
|
7 |
# Loading the tokenizer and model from Hugging Face's model hub.
|
8 |
+
# model_name_or_path = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
9 |
+
model_name_or_path = "scutcyr/BianQue-2"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,trust_remote_code=True)
|
11 |
+
# model = AutoModelForCausalLM.from_pretrained(model_name,trust_remote_code=True)
|
12 |
+
model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True).half()
|
13 |
|
14 |
# using CUDA for an optimal experience
|
15 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|