Tonic commited on
Commit
aadc8fb
1 Parent(s): 59ed1e7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -0
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+ import os
5
+ import gradio as gr
6
+ import sentencepiece
7
+
8
+
9
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:120'
10
+ model_id = "01-ai/Yi-9B"
11
+ tokenizer_path = "./"
12
+ # eos_token_id = 7
13
+
14
+ DESCRIPTION = """
15
+ # 👋🏻Welcome to 🙋🏻‍♂️Tonic's🧑🏻‍🚀YI-9B-Base!🚀
16
+ You can use this Space to test out the current model [01-ai/Yi-9B](https://huggingface.co/01-ai/Yi-9B) ".
17
+ You can also use 🧑🏻‍🚀YI-9B-Base🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/Yi-9B?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
18
+ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
19
+ """
20
+
21
+ tokenizer = AutoTokenizer.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
22
+ # tokenizer = YiTokenizer.from_pretrained(tokenizer_path)
23
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)
24
+ # tokenizer.eos_token_id = eos_token_id
25
+ # model.config.eos_token_id = eos_token_id
26
+
27
+ def format_prompt(user_message, system_message="You are YiTonic, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and follow ethical guidelines and promote positive behavior."):
28
+ prompt = f"<|im_start|>assistant\n{system_message}<|im_end|>\n<|im_start|>\nuser\n{user_message}<|im_end|>\nassistant\n"
29
+ return prompt
30
+
31
+ @spaces.GPU
32
+ def predict(message, system_message, max_new_tokens=600, temperature=3.5, top_p=0.9, top_k=40, do_sample=False):
33
+ formatted_prompt = format_prompt(message, system_message)
34
+
35
+ input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt')
36
+ input_ids = input_ids.to(model.device)
37
+
38
+ response_ids = model.generate(
39
+ input_ids,
40
+ max_length=max_new_tokens + input_ids.shape[1],
41
+ temperature=temperature,
42
+ top_p=top_p,
43
+ top_k=top_k,
44
+ no_repeat_ngram_size=9,
45
+ pad_token_id=tokenizer.eos_token_id,
46
+ do_sample=do_sample
47
+ )
48
+
49
+ response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
50
+ truncate_str = "<|im_end|>"
51
+ if truncate_str and truncate_str in response:
52
+ response = response.split(truncate_str)[0]
53
+
54
+ return [("bot", response)]
55
+
56
+ with gr.Blocks() as demo:
57
+ gr.Markdown(DESCRIPTION)
58
+ with gr.Group():
59
+ textbox = gr.Textbox(placeholder='Your Message Here', label='Your Message', lines=2)
60
+ system_prompt = gr.Textbox(placeholder='Provide a System Prompt In The First Person', label='System Prompt', lines=2, value="You are YiTonic, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior.")
61
+
62
+ with gr.Group():
63
+ chatbot = gr.Chatbot(label='TonicYi-9B-Base-🧠🤯')
64
+
65
+ with gr.Group():
66
+ submit_button = gr.Button('Submit', variant='primary')
67
+
68
+ with gr.Accordion(label='Advanced options', open=False):
69
+ max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=55000, step=1, value=4056)
70
+ temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=1.2)
71
+ top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
72
+ top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=40)
73
+ do_sample_checkbox = gr.Checkbox(label='Disable for faster inference', value=True)
74
+
75
+ submit_button.click(
76
+ fn=predict,
77
+ inputs=[textbox, system_prompt, max_new_tokens, temperature, top_p, top_k, do_sample_checkbox],
78
+ outputs=chatbot
79
+ )
80
+
81
+ demo.launch()