Update app.py
Browse files
app.py
CHANGED
@@ -7,19 +7,19 @@ import gradio as gr
|
|
7 |
import sentencepiece
|
8 |
from tokenization_xgen import XgenTokenizer
|
9 |
|
10 |
-
title = "Welcome to 🙋🏻♂️Tonic's
|
11 |
-
description = "Interestingly there simply wasnt a public demo for
|
12 |
|
13 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
|
14 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
15 |
-
model_name = "
|
16 |
-
tokenizer =
|
17 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
18 |
model = model.to(dtype=torch.bfloat16)
|
19 |
model = model.to(device)
|
20 |
|
21 |
class XgenChatBot:
|
22 |
-
def __init__(self, model, tokenizer, system_message="You are
|
23 |
self.model = model
|
24 |
self.tokenizer = tokenizer
|
25 |
self.system_message = system_message
|
@@ -28,7 +28,7 @@ class XgenChatBot:
|
|
28 |
self.system_message = new_system_message
|
29 |
|
30 |
def format_prompt(self, user_message):
|
31 |
-
prompt = f"<|
|
32 |
return prompt
|
33 |
|
34 |
def predict(self, user_message, temperature=0.4, max_new_tokens=70, top_p=0.99, repetition_penalty=1.9):
|
|
|
7 |
import sentencepiece
|
8 |
from tokenization_xgen import XgenTokenizer
|
9 |
|
10 |
+
title = "Welcome to 🙋🏻♂️Tonic's🌷Xgen-8K Chat!"
|
11 |
+
description = "Interestingly there simply wasnt a public demo for Tulu, So I made one. You can use [allenai/tulu-2-dpo-70b](https://huggingface.co/allenai/tulu-2-dpo-70b) via API using Gradio by scrolling down and clicking Use 'Via API' or privately by [cloning this space on huggingface](https://huggingface.co/spaces/Tonic1/TuluDemo?duplicate=true) . [Join my active builders' server on discord](https://discord.gg/VqTxc76K3u). Let's build together!."
|
12 |
|
13 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
|
14 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
15 |
+
model_name = "allenai/tulu-2-dpo-70b"
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained("allenai/tulu-2-dpo-70b")
|
17 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
18 |
model = model.to(dtype=torch.bfloat16)
|
19 |
model = model.to(device)
|
20 |
|
21 |
class XgenChatBot:
|
22 |
+
def __init__(self, model, tokenizer, system_message="You are 🌷Tulu, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."):
|
23 |
self.model = model
|
24 |
self.tokenizer = tokenizer
|
25 |
self.system_message = system_message
|
|
|
28 |
self.system_message = new_system_message
|
29 |
|
30 |
def format_prompt(self, user_message):
|
31 |
+
prompt = f"<|assistant|>\n {self.system_message}\n\n <|user|>{user_message}\n\n<|assistant|>\n"
|
32 |
return prompt
|
33 |
|
34 |
def predict(self, user_message, temperature=0.4, max_new_tokens=70, top_p=0.99, repetition_penalty=1.9):
|