tanyuzhou commited on
Commit
e514fc1
β€’
1 Parent(s): 45189bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -14
app.py CHANGED
@@ -1,15 +1,11 @@
1
  import gradio as gr
2
 
3
- import torch
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
- from transformers import TextStreamer
6
- import transformers
7
 
8
  import spaces
9
-
10
- @spaces.GPU
11
- def load_pretrained_model():
12
- return AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True)
13
 
14
  # Define the response function
15
  @spaces.GPU
@@ -21,8 +17,8 @@ def respond(
21
  temperature,
22
  top_p,
23
  ):
24
- global model, tokenizer
25
-
26
  # Construct the messages for the chat
27
  messages = [{"role": "", "content": system_message}]
28
  for user_message, bot_response in history:
@@ -83,10 +79,7 @@ demo = gr.ChatInterface(
83
  )
84
 
85
  if __name__ == "__main__":
86
- global model, tokenizer
87
-
88
- tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
89
- tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>ζ˜Ÿι‡Ž:\n' }}{% endif %}" # Be careful that this model used custom chat template.
90
 
91
  model = load_pretrained_model()
92
 
 
1
  import gradio as gr
2
 
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
4
 
5
  import spaces
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
8
+ tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>ζ˜Ÿι‡Ž:\n' }}{% endif %}" # Be careful that this model used custom chat template.
 
9
 
10
  # Define the response function
11
  @spaces.GPU
 
17
  temperature,
18
  top_p,
19
  ):
20
+ model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=True, trust_remote_code=True)
21
+
22
  # Construct the messages for the chat
23
  messages = [{"role": "", "content": system_message}]
24
  for user_message, bot_response in history:
 
79
  )
80
 
81
  if __name__ == "__main__":
82
+ global model
 
 
 
83
 
84
  model = load_pretrained_model()
85