Update app.py
Browse files
app.py
CHANGED
@@ -19,274 +19,41 @@ from transformers import AutoModel, AutoTokenizer
|
|
19 |
# PYTORCH_ENABLE_MPS_FALLBACK=1 python web_demo_2.5.py --device mps
|
20 |
|
21 |
# Argparser
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
assert device in ['cuda', 'mps']
|
27 |
-
|
28 |
-
# Load model
|
29 |
-
model_path = 'y0un92/test'
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
model = AutoModel.from_pretrained("y0un92/test", trust_remote_code=True)
|
34 |
-
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
35 |
model.eval()
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
'
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
repetition_penalty_slider2 = {
|
67 |
-
'minimum': 0,
|
68 |
-
'maximum': 3,
|
69 |
-
'value': 1.05,
|
70 |
-
'step': 0.01,
|
71 |
-
'interactive': True,
|
72 |
-
'label': 'Repetition Penalty'
|
73 |
-
}
|
74 |
-
max_new_tokens_slider = {
|
75 |
-
'minimum': 1,
|
76 |
-
'maximum': 4096,
|
77 |
-
'value': 1024,
|
78 |
-
'step': 1,
|
79 |
-
'interactive': True,
|
80 |
-
'label': 'Max New Tokens'
|
81 |
-
}
|
82 |
-
|
83 |
-
top_p_slider = {
|
84 |
-
'minimum': 0,
|
85 |
-
'maximum': 1,
|
86 |
-
'value': 0.8,
|
87 |
-
'step': 0.05,
|
88 |
-
'interactive': True,
|
89 |
-
'label': 'Top P'
|
90 |
-
}
|
91 |
-
top_k_slider = {
|
92 |
-
'minimum': 0,
|
93 |
-
'maximum': 200,
|
94 |
-
'value': 100,
|
95 |
-
'step': 1,
|
96 |
-
'interactive': True,
|
97 |
-
'label': 'Top K'
|
98 |
-
}
|
99 |
-
temperature_slider = {
|
100 |
-
'minimum': 0,
|
101 |
-
'maximum': 2,
|
102 |
-
'value': 0.7,
|
103 |
-
'step': 0.05,
|
104 |
-
'interactive': True,
|
105 |
-
'label': 'Temperature'
|
106 |
-
}
|
107 |
-
|
108 |
-
|
109 |
-
def create_component(params, comp='Slider'):
|
110 |
-
if comp == 'Slider':
|
111 |
-
return gr.Slider(
|
112 |
-
minimum=params['minimum'],
|
113 |
-
maximum=params['maximum'],
|
114 |
-
value=params['value'],
|
115 |
-
step=params['step'],
|
116 |
-
interactive=params['interactive'],
|
117 |
-
label=params['label']
|
118 |
-
)
|
119 |
-
elif comp == 'Radio':
|
120 |
-
return gr.Radio(
|
121 |
-
choices=params['choices'],
|
122 |
-
value=params['value'],
|
123 |
-
interactive=params['interactive'],
|
124 |
-
label=params['label']
|
125 |
-
)
|
126 |
-
elif comp == 'Button':
|
127 |
-
return gr.Button(
|
128 |
-
value=params['value'],
|
129 |
-
interactive=True
|
130 |
-
)
|
131 |
-
|
132 |
-
@spaces.GPU(duration=120)
|
133 |
-
def chat(img, msgs, ctx, params=None, vision_hidden_states=None):
|
134 |
-
default_params = {"stream": False, "sampling": False, "num_beams":3, "repetition_penalty": 1.2, "max_new_tokens": 1024}
|
135 |
-
if params is None:
|
136 |
-
params = default_params
|
137 |
-
if img is None:
|
138 |
-
yield "Error, invalid image, please upload a new image"
|
139 |
-
else:
|
140 |
-
try:
|
141 |
-
image = img.convert('RGB')
|
142 |
-
answer = model.chat(
|
143 |
-
image=image,
|
144 |
-
msgs=msgs,
|
145 |
-
tokenizer=tokenizer,
|
146 |
-
**params
|
147 |
-
)
|
148 |
-
# if params['stream'] is False:
|
149 |
-
# res = re.sub(r'(<box>.*</box>)', '', answer)
|
150 |
-
# res = res.replace('<ref>', '')
|
151 |
-
# res = res.replace('</ref>', '')
|
152 |
-
# res = res.replace('<box>', '')
|
153 |
-
# answer = res.replace('</box>', '')
|
154 |
-
# else:
|
155 |
-
for char in answer:
|
156 |
-
yield char
|
157 |
-
except Exception as err:
|
158 |
-
print(err)
|
159 |
-
traceback.print_exc()
|
160 |
-
yield ERROR_MSG
|
161 |
-
|
162 |
-
|
163 |
-
def upload_img(image, _chatbot, _app_session):
|
164 |
-
image = Image.fromarray(image)
|
165 |
-
|
166 |
-
_app_session['sts']=None
|
167 |
-
_app_session['ctx']=[]
|
168 |
-
_app_session['img']=image
|
169 |
-
_chatbot.append(('', 'Image uploaded successfully, you can talk to me now'))
|
170 |
-
return _chatbot, _app_session
|
171 |
-
|
172 |
-
|
173 |
-
def respond(_chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature):
|
174 |
-
_question = _chat_bot[-1][0]
|
175 |
-
print('<Question>:', _question)
|
176 |
-
if _app_cfg.get('ctx', None) is None:
|
177 |
-
_chat_bot[-1][1] = 'Please upload an image to start'
|
178 |
-
yield (_chat_bot, _app_cfg)
|
179 |
-
else:
|
180 |
-
_context = _app_cfg['ctx'].copy()
|
181 |
-
if _context:
|
182 |
-
_context.append({"role": "user", "content": _question})
|
183 |
-
else:
|
184 |
-
_context = [{"role": "user", "content": _question}]
|
185 |
-
if params_form == 'Beam Search':
|
186 |
-
params = {
|
187 |
-
'sampling': False,
|
188 |
-
'stream': False,
|
189 |
-
'num_beams': num_beams,
|
190 |
-
'repetition_penalty': repetition_penalty,
|
191 |
-
"max_new_tokens": 896
|
192 |
-
}
|
193 |
-
else:
|
194 |
-
params = {
|
195 |
-
'sampling': True,
|
196 |
-
'stream': True,
|
197 |
-
'top_p': top_p,
|
198 |
-
'top_k': top_k,
|
199 |
-
'temperature': temperature,
|
200 |
-
'repetition_penalty': repetition_penalty_2,
|
201 |
-
"max_new_tokens": 896
|
202 |
-
}
|
203 |
-
|
204 |
-
gen = chat(_app_cfg['img'], _context, None, params)
|
205 |
-
_chat_bot[-1][1] = ""
|
206 |
-
for _char in gen:
|
207 |
-
_chat_bot[-1][1] += _char
|
208 |
-
_context[-1]["content"] += _char
|
209 |
-
yield (_chat_bot, _app_cfg)
|
210 |
-
|
211 |
-
|
212 |
-
def request(_question, _chat_bot, _app_cfg):
|
213 |
-
_chat_bot.append((_question, None))
|
214 |
-
return '', _chat_bot, _app_cfg
|
215 |
-
|
216 |
-
|
217 |
-
def regenerate_button_clicked(_question, _chat_bot, _app_cfg):
|
218 |
-
if len(_chat_bot) <= 1:
|
219 |
-
_chat_bot.append(('Regenerate', 'No question for regeneration.'))
|
220 |
-
return '', _chat_bot, _app_cfg
|
221 |
-
elif _chat_bot[-1][0] == 'Regenerate':
|
222 |
-
return '', _chat_bot, _app_cfg
|
223 |
-
else:
|
224 |
-
_question = _chat_bot[-1][0]
|
225 |
-
_chat_bot = _chat_bot[:-1]
|
226 |
-
_app_cfg['ctx'] = _app_cfg['ctx'][:-2]
|
227 |
-
return request(_question, _chat_bot, _app_cfg)
|
228 |
-
# return respond(_chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature)
|
229 |
-
|
230 |
-
|
231 |
-
def clear_button_clicked(_question, _chat_bot, _app_cfg, _bt_pic):
|
232 |
-
_chat_bot.clear()
|
233 |
-
_app_cfg['sts'] = None
|
234 |
-
_app_cfg['ctx'] = None
|
235 |
-
_app_cfg['img'] = None
|
236 |
-
_bt_pic = None
|
237 |
-
return '', _chat_bot, _app_cfg, _bt_pic
|
238 |
-
|
239 |
-
|
240 |
-
with gr.Blocks() as demo:
|
241 |
-
with gr.Row():
|
242 |
-
with gr.Column(scale=1, min_width=300):
|
243 |
-
params_form = create_component(form_radio, comp='Radio')
|
244 |
-
with gr.Accordion("Beam Search") as beams_according:
|
245 |
-
num_beams = create_component(num_beams_slider)
|
246 |
-
repetition_penalty = create_component(repetition_penalty_slider)
|
247 |
-
with gr.Accordion("Sampling") as sampling_according:
|
248 |
-
top_p = create_component(top_p_slider)
|
249 |
-
top_k = create_component(top_k_slider)
|
250 |
-
temperature = create_component(temperature_slider)
|
251 |
-
repetition_penalty_2 = create_component(repetition_penalty_slider2)
|
252 |
-
regenerate = create_component({'value': 'Regenerate'}, comp='Button')
|
253 |
-
clear = create_component({'value': 'Clear'}, comp='Button')
|
254 |
-
with gr.Column(scale=3, min_width=500):
|
255 |
-
app_session = gr.State({'sts':None,'ctx':None,'img':None})
|
256 |
-
bt_pic = gr.Image(label="Upload an image to start")
|
257 |
-
chat_bot = gr.Chatbot(label=f"Chat with {model_name}")
|
258 |
-
txt_message = gr.Textbox(label="Input text")
|
259 |
-
|
260 |
-
clear.click(
|
261 |
-
clear_button_clicked,
|
262 |
-
[txt_message, chat_bot, app_session, bt_pic],
|
263 |
-
[txt_message, chat_bot, app_session, bt_pic],
|
264 |
-
queue=False
|
265 |
-
)
|
266 |
-
txt_message.submit(
|
267 |
-
request,
|
268 |
-
#[txt_message, chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
269 |
-
[txt_message, chat_bot, app_session],
|
270 |
-
[txt_message, chat_bot, app_session],
|
271 |
-
queue=False
|
272 |
-
).then(
|
273 |
-
respond,
|
274 |
-
[chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
275 |
-
[chat_bot, app_session]
|
276 |
-
)
|
277 |
-
regenerate.click(
|
278 |
-
regenerate_button_clicked,
|
279 |
-
[txt_message, chat_bot, app_session],
|
280 |
-
[txt_message, chat_bot, app_session],
|
281 |
-
queue=False
|
282 |
-
).then(
|
283 |
-
respond,
|
284 |
-
[chat_bot, app_session, params_form, num_beams, repetition_penalty, repetition_penalty_2, top_p, top_k, temperature],
|
285 |
-
[chat_bot, app_session]
|
286 |
-
)
|
287 |
-
bt_pic.upload(lambda: None, None, chat_bot, queue=False).then(upload_img, inputs=[bt_pic,chat_bot,app_session], outputs=[chat_bot,app_session])
|
288 |
-
|
289 |
-
# launch
|
290 |
-
#demo.launch(share=False, debug=True, show_api=False, server_port=8080, server_name="0.0.0.0")
|
291 |
-
demo.queue()
|
292 |
-
demo.launch()
|
|
|
19 |
# PYTORCH_ENABLE_MPS_FALLBACK=1 python web_demo_2.5.py --device mps
|
20 |
|
21 |
# Argparser
|
22 |
+
# test.py
|
23 |
+
import torch
|
24 |
+
from PIL import Image
|
25 |
+
from transformers import AutoModel, AutoTokenizer
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True)
|
28 |
+
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True)
|
|
|
|
|
29 |
model.eval()
|
30 |
|
31 |
+
image = Image.open('xx.jpg').convert('RGB')
|
32 |
+
question = 'What is in the image?'
|
33 |
+
msgs = [{'role': 'user', 'content': question}]
|
34 |
+
|
35 |
+
res = model.chat(
|
36 |
+
image=image,
|
37 |
+
msgs=msgs,
|
38 |
+
tokenizer=tokenizer,
|
39 |
+
sampling=True, # if sampling=False, beam_search will be used by default
|
40 |
+
temperature=0.7,
|
41 |
+
# system_prompt='' # pass system_prompt if needed
|
42 |
+
)
|
43 |
+
print(res)
|
44 |
+
|
45 |
+
## if you want to use streaming, please make sure sampling=True and stream=True
|
46 |
+
## the model.chat will return a generator
|
47 |
+
res = model.chat(
|
48 |
+
image=image,
|
49 |
+
msgs=msgs,
|
50 |
+
tokenizer=tokenizer,
|
51 |
+
sampling=True,
|
52 |
+
temperature=0.7,
|
53 |
+
stream=True
|
54 |
+
)
|
55 |
+
|
56 |
+
generated_text = ""
|
57 |
+
for new_text in res:
|
58 |
+
generated_text += new_text
|
59 |
+
print(new_text, flush=True, end='')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|