Spaces:
Sleeping
Sleeping
create app
Browse files
app.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
import torch
|
3 |
+
from peft import PeftModel
|
4 |
+
|
5 |
+
model = AutoModelForCausalLM.from_pretrained("DAMO-NLP-MT/polylm-1.7b")
|
6 |
+
model = PeftModel.from_pretrained(model, "fadliaulawi/polylm-1.7b-finetuned")
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("DAMO-NLP-MT/polylm-1.7b", padding_side="left", use_fast = False)
|
8 |
+
|
9 |
+
def generate_prompt(
|
10 |
+
instruction, input, label
|
11 |
+
):
|
12 |
+
# template = {
|
13 |
+
# "description": "Template used by Alpaca-LoRA.",
|
14 |
+
# "prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
|
15 |
+
# "prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
|
16 |
+
# "response_split": "### Response:"
|
17 |
+
# }
|
18 |
+
# <s>[INST] <<SYS>>
|
19 |
+
# {{ system_prompt }}
|
20 |
+
# <</SYS>>
|
21 |
+
|
22 |
+
# {{ user_message }} [/INST]
|
23 |
+
# return '''<s>[INST] <<SYS>>\n{0}\n<</SYS>>\n\n{1} {2} [/INST]'''.format(template['prompt_input'].format(instruction=instruction, input=input), template['response_split'], label)
|
24 |
+
template = {
|
25 |
+
"description": "Template used by Alpaca-LoRA.",
|
26 |
+
"prompt_input": "Di bawah ini adalah instruksi yang menjelaskan tugas, dipasangkan dengan masukan yang memberikan konteks lebih lanjut. Tulis tanggapan yang melengkapi permintaan dengan tepat.\n\n### Instruksi:\n{instruction}\n\n### Masukan:\n{input}",
|
27 |
+
#"prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
|
28 |
+
"response_split": "### Tanggapan:"
|
29 |
+
}
|
30 |
+
|
31 |
+
if input:
|
32 |
+
res = template["prompt_input"].format(instruction=instruction, input=input)
|
33 |
+
#else:
|
34 |
+
# res = template["prompt_no_input"].format(instruction=instruction)
|
35 |
+
|
36 |
+
res = f"{res} \n\n### Tanggapan:\n"
|
37 |
+
if label:
|
38 |
+
res = f"{res}{label}"
|
39 |
+
|
40 |
+
return res
|
41 |
+
|
42 |
+
def user(message, history):
|
43 |
+
return "", history + [[message, None]]
|
44 |
+
|
45 |
+
def generate_and_tokenize_prompt(data_point):
|
46 |
+
full_prompt = generate_prompt(
|
47 |
+
data_point["instruction"],
|
48 |
+
data_point["input"],
|
49 |
+
data_point["output"],
|
50 |
+
)
|
51 |
+
# print(full_prompt)
|
52 |
+
# return
|
53 |
+
cutoff_len = 256
|
54 |
+
tokenizer.pad_token = tokenizer.eos_token
|
55 |
+
result = tokenizer(
|
56 |
+
full_prompt,
|
57 |
+
truncation=True,
|
58 |
+
max_length=cutoff_len,
|
59 |
+
padding=True,
|
60 |
+
return_tensors=None,
|
61 |
+
)
|
62 |
+
|
63 |
+
if (result["input_ids"][-1] != tokenizer.eos_token_id and len(result["input_ids"]) < cutoff_len):
|
64 |
+
result["input_ids"].append(tokenizer.eos_token_id)
|
65 |
+
result["attention_mask"].append(1)
|
66 |
+
|
67 |
+
# result["labels"] = result["input_ids"].copy()
|
68 |
+
return result
|
69 |
+
|
70 |
+
def chatbot(history,temperature, max_new_tokens, top_p,top_k):
|
71 |
+
user_message = history[-1][0]
|
72 |
+
data = {
|
73 |
+
'instruction': "Jika Anda seorang dokter, silakan menjawab pertanyaan medis berdasarkan deskripsi pasien.",
|
74 |
+
'input': user_message,
|
75 |
+
'output': ''
|
76 |
+
}
|
77 |
+
|
78 |
+
new_user_input_ids = generate_and_tokenize_prompt(data)
|
79 |
+
|
80 |
+
# append the new user input tokens to the chat history
|
81 |
+
bot_input_ids = torch.LongTensor([new_user_input_ids['input_ids']])
|
82 |
+
|
83 |
+
# generate a response
|
84 |
+
response = model.generate(
|
85 |
+
input_ids=bot_input_ids,
|
86 |
+
pad_token_id=tokenizer.eos_token_id,
|
87 |
+
temperature = float(temperature),
|
88 |
+
max_new_tokens=max_new_tokens,
|
89 |
+
top_p=float(top_p),
|
90 |
+
top_k=top_k,
|
91 |
+
do_sample=True
|
92 |
+
)
|
93 |
+
|
94 |
+
# convert the tokens to text, and then split the responses into lines
|
95 |
+
response = tokenizer.batch_decode(response, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
96 |
+
#response = response.split("### Tanggapan:")
|
97 |
+
#response = response[1].strip() if len(response) > 1 else ""
|
98 |
+
sections = response.split("###")
|
99 |
+
|
100 |
+
# Ambil potongan yang berisi "Tanggapan" yang pertama
|
101 |
+
response = sections[3]
|
102 |
+
|
103 |
+
#return response.split("Tanggapan:")[1].strip()
|
104 |
+
history[-1] = return response.split("Tanggapan:")[1].strip()
|
105 |
+
return history
|
106 |
+
|
107 |
+
with gr.Blocks() as demo:
|
108 |
+
temperature = gr.Slider(0, 5, value=0.8, step=0.1, label='Temperature')
|
109 |
+
max_length = gr.Slider(0, 8192, value=256, step=1, label='Max Length')
|
110 |
+
top_p = gr.Slider(0, 1, value=0.8, step=0.1, label='Top P')
|
111 |
+
top_k = gr.Slider(0, 50, value=50, step=1, label='Top K')
|
112 |
+
|
113 |
+
chatbot = gr.Chatbot()
|
114 |
+
msg = gr.Textbox()
|
115 |
+
submit = gr.Button("Submit")
|
116 |
+
clear = gr.Button("Clear")
|
117 |
+
|
118 |
+
examples = gr.Examples(examples=["Halo dokter", "Dokter aku flu, aku harus apa?"],inputs=[msg])
|
119 |
+
|
120 |
+
#submit.click(bot,[msg,chatbot,temperature, max_length, top_p,top_k],chatbot)
|
121 |
+
submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
122 |
+
bot, [chatbot,temperature,max_length,top_p,top_k], chatbot
|
123 |
+
)
|
124 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
125 |
+
|
126 |
+
demo.launch()
|