Fix inference code
Browse files- modeling_minicpmv.py +4 -4
modeling_minicpmv.py
CHANGED
@@ -352,6 +352,8 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
352 |
if image is not None and isinstance(copy_msgs[0]['content'], str):
|
353 |
copy_msgs[0]['content'] = [image, copy_msgs[0]['content']]
|
354 |
|
|
|
|
|
355 |
for i, msg in enumerate(copy_msgs):
|
356 |
role = msg["role"]
|
357 |
content = msg["content"]
|
@@ -361,8 +363,6 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
361 |
if isinstance(content, str):
|
362 |
content = [content]
|
363 |
|
364 |
-
images = []
|
365 |
-
tgt_sizes = []
|
366 |
cur_msgs = []
|
367 |
for c in content:
|
368 |
if isinstance(c, Image.Image):
|
@@ -387,10 +387,10 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
387 |
elif isinstance(c, str):
|
388 |
cur_msgs.append(c)
|
389 |
|
390 |
-
if tgt_sizes:
|
391 |
-
tgt_sizes = torch.vstack(tgt_sizes)
|
392 |
|
393 |
msg['content'] = '\n'.join(cur_msgs)
|
|
|
|
|
394 |
|
395 |
input_ids = tokenizer.apply_chat_template(copy_msgs, tokenize=True, add_generation_prompt=False)
|
396 |
|
|
|
352 |
if image is not None and isinstance(copy_msgs[0]['content'], str):
|
353 |
copy_msgs[0]['content'] = [image, copy_msgs[0]['content']]
|
354 |
|
355 |
+
images = []
|
356 |
+
tgt_sizes = []
|
357 |
for i, msg in enumerate(copy_msgs):
|
358 |
role = msg["role"]
|
359 |
content = msg["content"]
|
|
|
363 |
if isinstance(content, str):
|
364 |
content = [content]
|
365 |
|
|
|
|
|
366 |
cur_msgs = []
|
367 |
for c in content:
|
368 |
if isinstance(c, Image.Image):
|
|
|
387 |
elif isinstance(c, str):
|
388 |
cur_msgs.append(c)
|
389 |
|
|
|
|
|
390 |
|
391 |
msg['content'] = '\n'.join(cur_msgs)
|
392 |
+
if tgt_sizes:
|
393 |
+
tgt_sizes = torch.vstack(tgt_sizes)
|
394 |
|
395 |
input_ids = tokenizer.apply_chat_template(copy_msgs, tokenize=True, add_generation_prompt=False)
|
396 |
|