Fix input msgs been changed after calling chat
Browse files- modeling_minicpmv.py +3 -2
modeling_minicpmv.py
CHANGED
@@ -301,6 +301,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
301 |
vision_hidden_states=None,
|
302 |
max_new_tokens=1024,
|
303 |
sampling=True,
|
|
|
304 |
**kwargs
|
305 |
):
|
306 |
if isinstance(msgs, str):
|
@@ -353,7 +354,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
353 |
with torch.inference_mode():
|
354 |
res, vision_hidden_states = self.generate(
|
355 |
data_list=[final_input],
|
356 |
-
max_inp_length=
|
357 |
img_list=[images],
|
358 |
tokenizer=tokenizer,
|
359 |
max_new_tokens=max_new_tokens,
|
@@ -362,7 +363,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
362 |
**generation_config
|
363 |
)
|
364 |
answer = res[0]
|
365 |
-
context = msgs
|
366 |
context.append({"role": "assistant", "content": answer})
|
367 |
|
368 |
return answer, context, generation_config
|
|
|
301 |
vision_hidden_states=None,
|
302 |
max_new_tokens=1024,
|
303 |
sampling=True,
|
304 |
+
max_inp_length=2048,
|
305 |
**kwargs
|
306 |
):
|
307 |
if isinstance(msgs, str):
|
|
|
354 |
with torch.inference_mode():
|
355 |
res, vision_hidden_states = self.generate(
|
356 |
data_list=[final_input],
|
357 |
+
max_inp_length=max_inp_length,
|
358 |
img_list=[images],
|
359 |
tokenizer=tokenizer,
|
360 |
max_new_tokens=max_new_tokens,
|
|
|
363 |
**generation_config
|
364 |
)
|
365 |
answer = res[0]
|
366 |
+
context = msgs.copy()
|
367 |
context.append({"role": "assistant", "content": answer})
|
368 |
|
369 |
return answer, context, generation_config
|