Tonic commited on
Commit
5628f77
1 Parent(s): 7a71692

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -12,11 +12,11 @@ description = "Interestingly there simply wasnt a public demo for Tulu, So I mad
12
 
13
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
14
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
- model_name = "allenai/tulu-2-dpo-70b"
16
- tokenizer = AutoTokenizer.from_pretrained("allenai/tulu-2-dpo-70b")
17
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, use_flash_attention_2=True, device_map="auto")
18
 
19
- class XgenChatBot:
20
  def __init__(self, model, tokenizer, system_message="You are 🌷Tulu, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."):
21
  self.model = model
22
  self.tokenizer = tokenizer
@@ -47,11 +47,11 @@ class XgenChatBot:
47
  return response
48
 
49
  def gradio_predict(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty):
50
- Orca_bot.set_system_message(system_message)
51
- response = Orca_bot.predict(user_message, temperature, max_new_tokens, top_p, repetition_penalty)
52
  return response
53
 
54
- Orca_bot = OrcaChatBot(model, tokenizer)
55
 
56
  iface = gr.Interface(
57
  fn=gradio_predict,
 
12
 
13
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
14
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
+ model_name = "allenai/tulu-2-dpo-13b"
16
+ tokenizer = AutoTokenizer.from_pretrained("allenai/tulu-2-dpo-13b")
17
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, use_flash_attention_2=True, device_map="auto")
18
 
19
+ class TuluChatBot:
20
  def __init__(self, model, tokenizer, system_message="You are 🌷Tulu, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."):
21
  self.model = model
22
  self.tokenizer = tokenizer
 
47
  return response
48
 
49
  def gradio_predict(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty):
50
+ Tulu_bot.set_system_message(system_message)
51
+ response = Tulu_bot.predict(user_message, temperature, max_new_tokens, top_p, repetition_penalty)
52
  return response
53
 
54
+ Tulu_bot = TuluChatBot(model, tokenizer)
55
 
56
  iface = gr.Interface(
57
  fn=gradio_predict,