finalf0 commited on
Commit
3b6aeff
1 Parent(s): 52e5139

Add processor and image processor (#61)

Browse files

- add processor & image processor (b07c810aca591e9b636e075ffcb798d15c0380c8)
- restore resampler to 0830407 (0a74acd02098542d635d2d1944e31e4072fbef11)
- restore image_bound; restore model_max_length (9403e15c252d7ad9abeb8e967dece676f3f2e902)
- update chat msgs (b352d20568e158b9ee769c133019643e75c6d446)
- multi-images (6d7ce173874bfa672e6015792b3acebed34c54b6)

configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"multimodal-dialogue"}
image_processing_minicpmv.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union, Dict, Any
2
+
3
+ import torch
4
+ import math
5
+ import PIL.Image
6
+ import PIL.ImageSequence
7
+ import numpy as np
8
+ import PIL
9
+ from PIL import Image
10
+
11
+ from transformers.utils import TensorType, requires_backends, is_torch_dtype, is_torch_device
12
+ from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
13
+ from transformers import AutoImageProcessor
14
+ from transformers.image_transforms import to_channel_dimension_format
15
+ from transformers.image_utils import (
16
+ ImageInput,
17
+ make_list_of_images,
18
+ valid_images,
19
+ is_torch_tensor,
20
+ to_numpy_array,
21
+ infer_channel_dimension_format,
22
+ ChannelDimension
23
+ )
24
+
25
+
26
+ def recursive_converter(converter, value):
27
+ if isinstance(value, list):
28
+ new_value = []
29
+ for v in value:
30
+ new_value += [recursive_converter(converter, v)]
31
+ return new_value
32
+ else:
33
+ return converter(value)
34
+
35
+
36
+ class MiniCPMVBatchFeature(BatchFeature):
37
+ r"""
38
+ Extend from BatchFeature for supporting various image size
39
+ """
40
+ def __init__(self, data: Optional[Dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None):
41
+ super().__init__(data)
42
+ self.convert_to_tensors(tensor_type=tensor_type)
43
+
44
+ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
45
+ if tensor_type is None:
46
+ return self
47
+
48
+ is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type)
49
+
50
+ def converter(value):
51
+ try:
52
+ if not is_tensor(value):
53
+ tensor = as_tensor(value)
54
+ return tensor
55
+ except: # noqa E722
56
+ if key == "overflowing_values":
57
+ raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
58
+ raise ValueError(
59
+ "Unable to create tensor, you should probably activate padding "
60
+ "with 'padding=True' to have batched tensors with the same length."
61
+ )
62
+
63
+
64
+ for key, value in self.items():
65
+ self[key] = recursive_converter(converter, value)
66
+ return self
67
+
68
+ def to(self, *args, **kwargs) -> "MiniCPMVBatchFeature":
69
+ requires_backends(self, ["torch"])
70
+ import torch
71
+
72
+ def cast_tensor(v):
73
+ # check if v is a floating point
74
+ if torch.is_floating_point(v):
75
+ # cast and send to device
76
+ return v.to(*args, **kwargs)
77
+ elif device is not None:
78
+ return v.to(device=device)
79
+ else:
80
+ return v
81
+
82
+ new_data = {}
83
+ device = kwargs.get("device")
84
+ # Check if the args are a device or a dtype
85
+ if device is None and len(args) > 0:
86
+ # device should be always the first argument
87
+ arg = args[0]
88
+ if is_torch_dtype(arg):
89
+ # The first argument is a dtype
90
+ pass
91
+ elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
92
+ device = arg
93
+ else:
94
+ # it's something else
95
+ raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
96
+ # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
97
+ for k, v in self.items():
98
+ new_data[k] = recursive_converter(cast_tensor, v)
99
+ self.data = new_data
100
+ return self
101
+
102
+
103
+ class MiniCPMVImageProcessor(BaseImageProcessor):
104
+ model_input_names = ["pixel_values"]
105
+
106
+ def __init__(
107
+ self,
108
+ max_slice_nums=9,
109
+ scale_resolution=448,
110
+ patch_size=14,
111
+ **kwargs):
112
+ super().__init__(**kwargs)
113
+ self.max_slice_nums = max_slice_nums
114
+ self.scale_resolution = scale_resolution
115
+ self.patch_size = patch_size
116
+ self.image_feature_size = kwargs.pop("image_feature_size", 64)
117
+ self.im_start_token = kwargs.pop("im_start", "")
119
+ self.slice_start_token = kwargs.pop("slice_start", "<slice>")
120
+ self.slice_end_token = kwargs.pop("slice_end", "</slice>")
121
+ self.unk_token = kwargs.pop("unk", "<unk>")
122
+ self.mean = np.array(kwargs.pop("norm_mean", [0.5, 0.5, 0.5]))
123
+ self.std = np.array(kwargs.pop("norm_std", [0.5, 0.5, 0.5]))
124
+ self.version = kwargs.pop("version", 2.0)
125
+
126
+ def ensure_divide(self, length, patch_size):
127
+ return max(round(length / patch_size) * patch_size, patch_size)
128
+
129
+ def find_best_resize(self,
130
+ original_size,
131
+ scale_resolution,
132
+ patch_size,
133
+ allow_upscale=False):
134
+ width, height = original_size
135
+ if (width * height >
136
+ scale_resolution * scale_resolution) or allow_upscale:
137
+ r = width / height
138
+ height = int(scale_resolution / math.sqrt(r))
139
+ width = int(height * r)
140
+ best_width = self.ensure_divide(width, patch_size)
141
+ best_height = self.ensure_divide(height, patch_size)
142
+ return (best_width, best_height)
143
+
144
+ def get_refine_size(self,
145
+ original_size,
146
+ grid,
147
+ scale_resolution,
148
+ patch_size,
149
+ allow_upscale=False):
150
+ width, height = original_size
151
+ grid_x, grid_y = grid
152
+
153
+ refine_width = self.ensure_divide(width, grid_x)
154
+ refine_height = self.ensure_divide(height, grid_y)
155
+
156
+ grid_width = refine_width / grid_x
157
+ grid_height = refine_height / grid_y
158
+
159
+ best_grid_size = self.find_best_resize((grid_width, grid_height),
160
+ scale_resolution,
161
+ patch_size,
162
+ allow_upscale=allow_upscale)
163
+ refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
164
+ return refine_size
165
+
166
+ def split_to_patches(self, image, grid):
167
+ patches = []
168
+ width, height = image.size
169
+ grid_x = int(width / grid[0])
170
+ grid_y = int(height / grid[1])
171
+ for i in range(0, height, grid_y):
172
+ images = []
173
+ for j in range(0, width, grid_x):
174
+ box = (j, i, j + grid_x, i + grid_y)
175
+ patch = image.crop(box)
176
+ images.append(patch)
177
+ patches.append(images)
178
+ return patches
179
+
180
+ def slice_image(
181
+ self, image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False
182
+ ):
183
+ original_size = image.size
184
+ original_width, original_height = original_size
185
+ log_ratio = math.log(original_width / original_height)
186
+ ratio = original_width * original_height / (scale_resolution * scale_resolution)
187
+ multiple = min(math.ceil(ratio), max_slice_nums)
188
+
189
+ source_image = None
190
+ best_grid = None
191
+ patches = []
192
+
193
+ if multiple <= 1 or never_split:
194
+ # dont need to slice, upsample
195
+ best_size = self.find_best_resize(
196
+ original_size, scale_resolution, patch_size, allow_upscale=True
197
+ )
198
+ source_image = image.resize(best_size, resample=Image.Resampling.BICUBIC)
199
+ else:
200
+ candidate_split_grids_nums = []
201
+ for i in [multiple - 1, multiple, multiple + 1]:
202
+ if i == 1 or i > max_slice_nums:
203
+ continue
204
+ candidate_split_grids_nums.append(i)
205
+
206
+ # source image, down-sampling and ensure divided by patch_size
207
+ best_resize = self.find_best_resize(original_size, scale_resolution, patch_size)
208
+ source_image = image.copy().resize(best_resize, resample=Image.Resampling.BICUBIC)
209
+ candidate_grids = []
210
+
211
+ # find best grid
212
+ for split_grids_nums in candidate_split_grids_nums:
213
+ m = 1
214
+ while m <= split_grids_nums:
215
+ if split_grids_nums % m == 0:
216
+ candidate_grids.append([m, split_grids_nums // m])
217
+ m += 1
218
+
219
+ best_grid = [1, 1]
220
+ min_error = float("inf")
221
+ for grid in candidate_grids:
222
+ error = abs(log_ratio - math.log(grid[0] / grid[1]))
223
+ if error < min_error:
224
+ best_grid = grid
225
+ min_error = error
226
+
227
+ refine_size = self.get_refine_size(
228
+ original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
229
+ )
230
+
231
+ refine_image = image.resize(refine_size, resample=Image.Resampling.BICUBIC)
232
+ patches = self.split_to_patches(refine_image, best_grid)
233
+
234
+ return source_image, patches, best_grid
235
+
236
+ def get_grid_placeholder(self, grid):
237
+ if grid is None:
238
+ return ""
239
+ image_placeholder = (
240
+ self.im_start_token
241
+ + self.unk_token * self.image_feature_size
242
+ + self.im_end_token
243
+ )
244
+
245
+ cols = grid[0]
246
+ rows = grid[1]
247
+ slices = []
248
+ for i in range(rows):
249
+ lines = []
250
+ for j in range(cols):
251
+ lines.append(image_placeholder)
252
+ slices.append("".join(lines))
253
+
254
+ slice_placeholder = self.slice_start_token + "\n".join(slices) + self.slice_end_token
255
+ return slice_placeholder
256
+
257
+ def get_sliced_images(self, image):
258
+ slice_images = []
259
+
260
+ source_image, patches, sliced_grid = self.slice_image(
261
+ image,
262
+ self.max_slice_nums, # default: 9
263
+ self.scale_resolution, # default: 448
264
+ self.patch_size # default: 14
265
+ )
266
+ slice_images.append(source_image)
267
+
268
+ if len(patches) > 0:
269
+ for i in range(len(patches)):
270
+ for j in range(len(patches[0])):
271
+ slice_images.append(patches[i][j])
272
+ return slice_images
273
+
274
+ def get_sliced_grid(self, image_size):
275
+ original_width, original_height = image_size
276
+ log_ratio = math.log(original_width / original_height)
277
+ ratio = original_width * original_height / (self.scale_resolution * self.scale_resolution)
278
+ multiple = min(math.ceil(ratio), self.max_slice_nums)
279
+ if multiple <= 1:
280
+ return None
281
+ candidate_split_grids_nums = []
282
+ for i in [multiple - 1, multiple, multiple + 1]:
283
+ if i == 1 or i > self.max_slice_nums:
284
+ continue
285
+ candidate_split_grids_nums.append(i)
286
+
287
+ candidate_grids = []
288
+ for split_grids_nums in candidate_split_grids_nums:
289
+ m = 1
290
+ while m <= split_grids_nums:
291
+ if split_grids_nums % m == 0:
292
+ candidate_grids.append([m, split_grids_nums // m])
293
+ m += 1
294
+
295
+ best_grid = [1, 1]
296
+ min_error = float("inf")
297
+ for grid in candidate_grids:
298
+ error = abs(log_ratio - math.log(grid[0] / grid[1]))
299
+ if error < min_error:
300
+ best_grid = grid
301
+ min_error = error
302
+
303
+ return best_grid
304
+
305
+ def get_slice_image_placeholder(self, image_size):
306
+ grid = self.get_sliced_grid(image_size=image_size)
307
+ return (
308
+ self.im_start_token
309
+ + self.unk_token * self.image_feature_size
310
+ + self.im_end_token
311
+ ) + self.get_grid_placeholder(grid=grid)
312
+
313
+ def to_pil_image(self, image, rescale=None) -> PIL.Image.Image:
314
+ """
315
+ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
316
+ needed.
317
+
318
+ Args:
319
+ image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
320
+ The image to convert to the PIL Image format.
321
+ rescale (`bool`, *optional*):
322
+ Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
323
+ default to `True` if the image type is a floating type, `False` otherwise.
324
+ """
325
+ if isinstance(image, PIL.Image.Image):
326
+ return image
327
+ if is_torch_tensor(image):
328
+ image = image.numpy()
329
+
330
+ if isinstance(image, np.ndarray):
331
+ if rescale is None:
332
+ # rescale default to the array being of floating type.
333
+ rescale = isinstance(image.flat[0], np.floating)
334
+ # If the channel as been moved to first dim, we put it back at the end.
335
+ if image.ndim == 3 and image.shape[0] in [1, 3]:
336
+ image = image.transpose(1, 2, 0)
337
+ if rescale:
338
+ image = image * 255
339
+ image = image.astype(np.uint8)
340
+ return PIL.Image.fromarray(image)
341
+ return image
342
+
343
+ def reshape_by_patch(self, image):
344
+ """
345
+ :param image: shape [3, H, W]
346
+ :param patch_size:
347
+ :return: [3, patch_size, HW/patch_size]
348
+ """
349
+ image = torch.from_numpy(image)
350
+ patch_size = self.patch_size
351
+ patches = torch.nn.functional.unfold(
352
+ image,
353
+ (patch_size, patch_size),
354
+ stride=(patch_size, patch_size)
355
+ )
356
+
357
+ patches = patches.reshape(image.size(0), patch_size, patch_size, -1)
358
+ patches = patches.permute(0, 1, 3, 2).reshape(image.size(0), patch_size, -1)
359
+ return patches.numpy()
360
+
361
+ def preprocess(
362
+ self,
363
+ images: ImageInput,
364
+ do_pad: Optional[bool] = True, # TODO: add pad for MiniCPM-Llama3-V-2_5
365
+ return_tensors: Optional[Union[str, TensorType]] = None
366
+ ) -> MiniCPMVBatchFeature:
367
+ images = make_list_of_images(images)
368
+
369
+ if not valid_images(images):
370
+ raise ValueError(
371
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
372
+ "torch.Tensor, tf.Tensor or jax.ndarray."
373
+ )
374
+
375
+ images = [self.to_pil_image(image).convert("RGB") for image in images]
376
+ input_data_format = infer_channel_dimension_format(np.array(images[0]))
377
+
378
+ new_images = []
379
+ image_sizes = [image.size for image in images]
380
+ tgt_sizes = []
381
+ for image in images:
382
+ image_patches = self.get_sliced_images(image)
383
+ image_patches = [to_numpy_array(image).astype(np.float32) / 255 for image in image_patches]
384
+ image_patches = [
385
+ self.normalize(image=image, mean=self.mean, std=self.std, input_data_format=input_data_format)
386
+ for image in image_patches
387
+ ]
388
+ image_patches = [
389
+ to_channel_dimension_format(image, ChannelDimension.FIRST, input_channel_dim=input_data_format)
390
+ for image in image_patches
391
+ ]
392
+ for slice_image in image_patches:
393
+ new_images.append(self.reshape_by_patch(slice_image))
394
+ tgt_sizes.append(np.array((slice_image.shape[1] // self.patch_size, slice_image.shape[2] // self.patch_size)))
395
+
396
+ if tgt_sizes:
397
+ tgt_sizes = np.vstack(tgt_sizes)
398
+ return MiniCPMVBatchFeature(
399
+ data={"pixel_values": new_images, "image_sizes": image_sizes, "tgt_sizes": tgt_sizes}, tensor_type=return_tensors
400
+ )
401
+
402
+ AutoImageProcessor.register("MiniCPMVImageProcessor", MiniCPMVImageProcessor)
modeling_minicpmv.py CHANGED
@@ -1,14 +1,13 @@
1
  import math
2
- from typing import List, Optional
3
  import json
4
  import torch
5
- import torchvision
6
  from threading import Thread
7
  from copy import deepcopy
8
  from PIL import Image
9
  from torchvision import transforms
10
- from transformers import LlamaTokenizer, LlamaPreTrainedModel, LlamaForCausalLM, AutoModel, PreTrainedTokenizerFast, TextIteratorStreamer
11
  from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
 
12
 
13
  from .configuration_minicpm import MiniCPMVConfig
14
  from .resampler import Resampler
@@ -42,13 +41,13 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
42
 
43
  return model
44
 
45
- def init_resampler(self, embed_dim, vision_dim,):
46
  return Resampler(
47
  num_queries=self.config.query_num,
48
  embed_dim=embed_dim,
49
  num_heads=embed_dim // 128,
50
  kv_dim=vision_dim,
51
- adaptive=True,
52
  )
53
 
54
  def init_transform(self):
@@ -60,13 +59,25 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
60
  ),
61
  ]
62
  )
63
-
64
  def get_input_embeddings(self):
65
  return self.llm.get_input_embeddings()
66
 
67
  def set_input_embeddings(self, value):
68
  self.llm.embed_tokens = value
69
-
 
 
 
 
 
 
 
 
 
 
 
 
70
  def get_vllm_embedding(self, data):
71
  if 'vision_hidden_states' not in data:
72
  dtype = self.llm.model.embed_tokens.weight.dtype
@@ -79,7 +90,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
79
  for pixel_values in pixel_values_list:
80
  img_cnt.append(len(pixel_values))
81
  all_pixel_values.extend([i.flatten(end_dim=1).permute(1, 0) for i in pixel_values])
82
-
83
  # exist image
84
  if all_pixel_values:
85
  tgt_sizes = torch.vstack(tgt_sizes).type(torch.int32)
@@ -107,7 +118,6 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
107
  single_pixel_values = single_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
108
  single_vision_embedding = self.vpm(single_pixel_values.type(dtype)).last_hidden_state
109
  single_vision_embedding = self.resampler(single_vision_embedding, single_tgt_size.unsqueeze(0))
110
-
111
  vision_embedding.append(single_vision_embedding)
112
  vision_embedding = torch.vstack(vision_embedding)
113
 
@@ -153,13 +163,14 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
153
  image_indices = torch.stack(
154
  [torch.arange(r[0], r[1], dtype=torch.long) for r in cur_image_bound]
155
  ).to(vllm_embedding.device)
 
156
  cur_vllm_emb.scatter_(0, image_indices.view(-1, 1).repeat(1, cur_vllm_emb.shape[-1]),
157
  cur_vs_hs.view(-1, cur_vs_hs.shape[-1]))
158
  elif self.training:
159
  cur_vllm_emb += cur_vs_hs[0].mean() * 0
160
-
161
- return vllm_embedding, vision_hidden_states
162
 
 
 
163
  def forward(self, data, **kwargs):
164
  vllm_embedding, vision_hidden_states = self.get_vllm_embedding(data)
165
  position_ids = data["position_ids"]
@@ -173,47 +184,18 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
173
  **kwargs
174
  )
175
 
176
- def _convert_to_tensors(
177
- self, tokenizer, input_ids, max_inp_length: Optional[int] = None
178
- ):
179
- if max_inp_length is not None:
180
- input_ids = input_ids[:max_inp_length]
181
- input_ids = torch.tensor(input_ids, dtype=torch.int32)
182
-
183
- image_start_tokens = torch.where(input_ids == tokenizer.im_start_id)[0]
184
- # 跳过 im_start
185
- image_start_tokens += 1
186
- image_end_tokens = torch.where(input_ids == tokenizer.im_end_id)[0]
187
- valid_image_nums = max(len(image_start_tokens), len(image_end_tokens))
188
- image_bound = torch.hstack(
189
- [
190
- image_start_tokens[:valid_image_nums].unsqueeze(-1),
191
- image_end_tokens[:valid_image_nums].unsqueeze(-1),
192
- ]
193
- )
194
-
195
- model_input = {}
196
- model_input["input_ids"] = input_ids.unsqueeze(0).to(self.device)
197
- model_input["image_bound"] = image_bound
198
-
199
- return model_input
200
-
201
- def _process_list(
202
- self, tokenizer, input_id_list, max_inp_length: Optional[int] = None
203
- ):
204
- pad_keys = ["input_ids"]
205
- input_tensors = []
206
- for input_ids in input_id_list:
207
- input_tensors.append(
208
- self._convert_to_tensors(tokenizer, input_ids, max_inp_length)
209
- )
210
- padded = {}
211
- for key in pad_keys:
212
- padded[key] = pad(input_tensors, key, padding_side="left").to(self.device)
213
- padded["image_bound"] = [i["image_bound"] for i in input_tensors]
214
- return padded
215
 
216
- def _decode(self, inputs_embeds, tokenizer, **kwargs):
217
  terminators = [
218
  tokenizer.eos_token_id,
219
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
@@ -224,7 +206,9 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
224
  eos_token_id=terminators,
225
  **kwargs
226
  )
227
- return self._decode_text(output, tokenizer)
 
 
228
 
229
  def _decode_stream(self, inputs_embeds, tokenizer, **kwargs):
230
  terminators = [
@@ -245,93 +229,20 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
245
 
246
  return streamer
247
 
248
- def _decode_text(self, result_ids, tokenizer):
249
- result_text = []
250
- for result in result_ids:
251
- result = result[result != 0]
252
- if result[0] == tokenizer.bos_id:
253
- result = result[1:]
254
- if result[-1] == tokenizer.eos_id or result[-1] == tokenizer.eot_id:
255
- result = result[:-1]
256
- result_text.append(tokenizer.decode(result).strip())
257
- return result_text
258
-
259
- def slice_image(self, image):
260
- return slice_image(
261
- image,
262
- self.config.slice_config.max_slice_nums,
263
- self.config.slice_config.scale_resolution,
264
- self.config.slice_config.patch_size,
265
- )
266
-
267
- def get_slice_image_placeholder(self, image, tokenizer):
268
- image_placeholder = (
269
- tokenizer.im_start
270
- + tokenizer.unk_token * self.config.query_num
271
- + tokenizer.im_end
272
- )
273
-
274
- slice_images = []
275
-
276
- source_image, patches, best_grid = slice_image(
277
- image,
278
- self.config.slice_config.max_slice_nums,
279
- self.config.slice_config.scale_resolution,
280
- self.config.slice_config.patch_size,
281
- )
282
-
283
- slice_images.append(source_image)
284
- final_placeholder = image_placeholder
285
-
286
- if len(patches) > 0:
287
- for i in range(len(patches)):
288
- for j in range(len(patches[0])):
289
- slice_images.append(patches[i][j])
290
-
291
- final_placeholder += get_grid_placeholder(
292
- tokenizer, best_grid, self.config.query_num
293
- )
294
-
295
- return slice_images, final_placeholder
296
-
297
- def reshape_by_patch(self, image_tensor):
298
- """
299
- :param image_tensor: shape [3, H, W]
300
- :param patch_size:
301
- :return: [3, patch_size, HW/patch_size]
302
- """
303
- patch_size = self.config.patch_size
304
- patches = torch.nn.functional.unfold(
305
- image_tensor,
306
- (patch_size, patch_size),
307
- stride=(patch_size, patch_size)
308
- )
309
-
310
- patches = patches.reshape(image_tensor.size(0), patch_size, patch_size, -1)
311
- patches = patches.permute(0, 1, 3, 2).reshape(image_tensor.size(0), patch_size, -1)
312
- return patches
313
-
314
  def generate(
315
  self,
316
- input_id_list=None,
317
- img_list=None,
318
- tgt_sizes=None,
319
  tokenizer=None,
320
- max_inp_length: Optional[int] = None,
321
  vision_hidden_states=None,
322
- return_vision_hidden_states=False,
323
  stream=False,
324
  **kwargs
325
  ):
326
-
327
- assert input_id_list is not None
328
- bs = len(input_id_list)
329
- if img_list == None:
330
  img_list = [[] for i in range(bs)]
331
  assert bs == len(img_list)
332
-
333
- model_inputs = self._process_list(tokenizer, input_id_list, max_inp_length)
334
-
335
  if vision_hidden_states is None:
336
  pixel_values = []
337
  for i in range(bs):
@@ -347,19 +258,17 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
347
  else:
348
  model_inputs["vision_hidden_states"] = vision_hidden_states
349
 
350
- with torch.inference_mode():
351
- (
352
- model_inputs["inputs_embeds"],
353
- vision_hidden_states,
354
- ) = self.get_vllm_embedding(model_inputs)
355
-
356
- if stream:
357
- result = self._decode_stream(model_inputs["inputs_embeds"], tokenizer, **kwargs)
358
- else:
359
- result = self._decode(model_inputs["inputs_embeds"], tokenizer, **kwargs)
360
 
361
- if return_vision_hidden_states:
362
- return result, vision_hidden_states
 
 
 
 
363
 
364
  return result
365
 
@@ -368,6 +277,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
368
  image,
369
  msgs,
370
  tokenizer,
 
371
  vision_hidden_states=None,
372
  max_new_tokens=1024,
373
  sampling=True,
@@ -376,18 +286,20 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
376
  stream=False,
377
  **kwargs
378
  ):
 
 
379
  if isinstance(msgs, str):
380
  msgs = json.loads(msgs)
381
-
382
  copy_msgs = deepcopy(msgs)
383
- assert len(copy_msgs) > 0, 'msgs is empty'
384
- assert sampling or not stream, 'if use stream mode, make sure sampling=True'
385
 
386
- if image is not None and isinstance(copy_msgs[0]['content'], str):
387
- copy_msgs[0]['content'] = [image, copy_msgs[0]['content']]
 
 
 
 
388
 
389
  images = []
390
- tgt_sizes = []
391
  for i, msg in enumerate(copy_msgs):
392
  role = msg["role"]
393
  content = msg["content"]
@@ -396,41 +308,21 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
396
  assert role == "user", "The role of first msg should be user"
397
  if isinstance(content, str):
398
  content = [content]
399
-
400
  cur_msgs = []
401
  for c in content:
402
  if isinstance(c, Image.Image):
403
- image = c
404
- if self.config.slice_mode:
405
- slice_images, image_placeholder = self.get_slice_image_placeholder(
406
- image, tokenizer
407
- )
408
- cur_msgs.append(image_placeholder)
409
- for slice_image in slice_images:
410
- slice_image = self.transform(slice_image)
411
- H, W = slice_image.shape[1:]
412
- images.append(self.reshape_by_patch(slice_image))
413
- tgt_sizes.append(torch.Tensor([H // self.config.patch_size, W // self.config.patch_size]).type(torch.int32))
414
- else:
415
- images.append(self.transform(image))
416
- cur_msgs.append(
417
- tokenizer.im_start
418
- + tokenizer.unk_token * self.config.query_num
419
- + tokenizer.im_end
420
- )
421
  elif isinstance(c, str):
422
  cur_msgs.append(c)
423
-
424
-
425
- msg['content'] = '\n'.join(cur_msgs)
426
- if tgt_sizes:
427
- tgt_sizes = torch.vstack(tgt_sizes)
428
 
429
  if system_prompt:
430
  sys_msg = {'role': 'system', 'content': system_prompt}
431
- copy_msgs = [sys_msg] + copy_msgs
432
 
433
- input_ids = tokenizer.apply_chat_template(copy_msgs, tokenize=True, add_generation_prompt=False)
 
434
 
435
  if sampling:
436
  generation_config = {
@@ -449,21 +341,17 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
449
  generation_config.update(
450
  (k, kwargs[k]) for k in generation_config.keys() & kwargs.keys()
451
  )
452
-
453
  with torch.inference_mode():
454
- res, vision_hidden_states = self.generate(
455
- input_id_list=[input_ids],
456
- max_inp_length=max_inp_length,
457
- img_list=[images],
458
- tgt_sizes=[tgt_sizes],
459
  tokenizer=tokenizer,
460
  max_new_tokens=max_new_tokens,
461
  vision_hidden_states=vision_hidden_states,
462
- return_vision_hidden_states=True,
463
  stream=stream,
 
464
  **generation_config
465
  )
466
-
467
  if stream:
468
  def stream_gen():
469
  for text in res:
@@ -474,229 +362,3 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
474
  else:
475
  answer = res[0]
476
  return answer
477
-
478
-
479
- class PreTrainedTokenizerFastWrapper(PreTrainedTokenizerFast):
480
- def __init__(self, **kwargs):
481
- super().__init__(**kwargs)
482
- self.eot_token = "<|eot_id|>"
483
- self.im_start = ""
485
- self.ref_start = "<ref>"
486
- self.ref_end = "</ref>"
487
- self.box_start = "<box>"
488
- self.box_end = "</box>"
489
- self.quad_start = "<quad>"
490
- self.quad_end = "</quad>"
491
- self.slice_start = "<slice>"
492
- self.slice_end = "</slice>"
493
-
494
- @property
495
- def eos_id(self):
496
- return self.eos_token_id
497
-
498
- @property
499
- def bos_id(self):
500
- return self.bos_token_id
501
-
502
- @property
503
- def unk_id(self):
504
- return self.unk_token_id
505
-
506
- @property
507
- def eot_id(self):
508
- return self.convert_tokens_to_ids(self.eot_token)
509
-
510
- @property
511
- def im_start_id(self):
512
- return self.convert_tokens_to_ids(self.im_start)
513
-
514
- @property
515
- def im_end_id(self):
516
- return self.convert_tokens_to_ids(self.im_end)
517
-
518
- @staticmethod
519
- def escape(text: str) -> str:
520
- return text
521
-
522
- @staticmethod
523
- def unescape(text: str) -> str:
524
- return text
525
-
526
-
527
- def pad(orig_items, key, max_length=None, padding_value=0, padding_side="left"):
528
- items = []
529
- if isinstance(orig_items[0][key], list):
530
- assert isinstance(orig_items[0][key][0], torch.Tensor)
531
- for it in orig_items:
532
- for tr in it[key]:
533
- items.append({key: tr})
534
- else:
535
- assert isinstance(orig_items[0][key], torch.Tensor)
536
- items = orig_items
537
-
538
- batch_size = len(items)
539
- shape = items[0][key].shape
540
- dim = len(shape)
541
- assert dim <= 3
542
- if max_length is None:
543
- max_length = 0
544
- max_length = max(max_length, max(item[key].shape[-1] for item in items))
545
- min_length = min(item[key].shape[-1] for item in items)
546
- dtype = items[0][key].dtype
547
-
548
- if dim == 1:
549
- return torch.cat([item[key] for item in items], dim=0)
550
- elif dim == 2:
551
- if max_length == min_length:
552
- return torch.cat([item[key] for item in items], dim=0)
553
- tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
554
- else:
555
- tensor = (
556
- torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype)
557
- + padding_value
558
- )
559
-
560
- for i, item in enumerate(items):
561
- if dim == 2:
562
- if padding_side == "left":
563
- tensor[i, -len(item[key][0]) :] = item[key][0].clone()
564
- else:
565
- tensor[i, : len(item[key][0])] = item[key][0].clone()
566
- elif dim == 3:
567
- if padding_side == "left":
568
- tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
569
- else:
570
- tensor[i, : len(item[key][0]), :] = item[key][0].clone()
571
-
572
- return tensor
573
-
574
-
575
- def slice_image(
576
- image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False
577
- ):
578
- original_size = image.size
579
- original_width, original_height = original_size
580
- log_ratio = math.log(original_width / original_height)
581
- ratio = original_width * original_height / (scale_resolution * scale_resolution)
582
- multiple = min(math.ceil(ratio), max_slice_nums)
583
-
584
- source_image = None
585
- best_grid = None
586
- patches = []
587
-
588
- if multiple <= 1 or never_split:
589
- # dont need to slice, upsample
590
- best_size = find_best_resize(
591
- original_size, scale_resolution, patch_size, allow_upscale=True
592
- )
593
- source_image = image.resize(best_size, Image.Resampling.BICUBIC)
594
- else:
595
- candidate_split_grids_nums = []
596
- for i in [multiple - 1, multiple, multiple + 1]:
597
- if i == 1 or i > max_slice_nums:
598
- continue
599
- candidate_split_grids_nums.append(i)
600
-
601
- # source image, down-sampling and ensure divided by patch_size
602
- best_resize = find_best_resize(original_size, scale_resolution, patch_size)
603
- source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC)
604
- candidate_grids = []
605
-
606
- # find best grid
607
- for split_grids_nums in candidate_split_grids_nums:
608
- m = 1
609
- while m <= split_grids_nums:
610
- if split_grids_nums % m == 0:
611
- candidate_grids.append([m, split_grids_nums // m])
612
- m += 1
613
-
614
- best_grid = [1, 1]
615
- min_error = float("inf")
616
- for grid in candidate_grids:
617
- error = abs(log_ratio - math.log(grid[0] / grid[1]))
618
- if error < min_error:
619
- best_grid = grid
620
- min_error = error
621
-
622
- refine_size = get_refine_size(
623
- original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
624
- )
625
-
626
- refine_image = image.resize(refine_size, Image.Resampling.BICUBIC)
627
- patches = split_to_patches(refine_image, best_grid)
628
-
629
- return source_image, patches, best_grid
630
-
631
-
632
- def ensure_divide(length, patch_size):
633
- return max(round(length / patch_size) * patch_size, patch_size)
634
-
635
-
636
- def find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=False):
637
- width, height = original_size
638
- if (width * height > scale_resolution * scale_resolution) or allow_upscale:
639
- r = width / height
640
- height = int(scale_resolution / math.sqrt(r))
641
- width = int(height * r)
642
- best_width = ensure_divide(width, patch_size)
643
- best_height = ensure_divide(height, patch_size)
644
- return (best_width, best_height)
645
-
646
-
647
- def get_refine_size(
648
- original_size, grid, scale_resolution, patch_size, allow_upscale=False
649
- ):
650
- width, height = original_size
651
- grid_x, grid_y = grid
652
-
653
- refine_width = ensure_divide(width, grid_x)
654
- refine_height = ensure_divide(height, grid_y)
655
-
656
- grid_width = refine_width / grid_x
657
- grid_height = refine_height / grid_y
658
-
659
- best_grid_size = find_best_resize(
660
- (grid_width, grid_height),
661
- scale_resolution,
662
- patch_size,
663
- allow_upscale=allow_upscale,
664
- )
665
-
666
- refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
667
-
668
- return refine_size
669
-
670
-
671
- def split_to_patches(image, grid):
672
- patches = []
673
- width, height = image.size
674
- grid_x = int(width / grid[0])
675
- grid_y = int(height / grid[1])
676
-
677
- for i in range(0, height, grid_y):
678
- images = []
679
- for j in range(0, width, grid_x):
680
- box = (j, i, j + grid_x, i + grid_y)
681
- patch = image.crop(box)
682
- images.append(patch)
683
- patches.append(images)
684
-
685
- return patches
686
-
687
-
688
- def get_grid_placeholder(tokenizer, grid, query_num):
689
- image_placeholder = (
690
- tokenizer.im_start + tokenizer.unk_token * query_num + tokenizer.im_end
691
- )
692
-
693
- cols = grid[0]
694
- rows = grid[1]
695
- slices = []
696
- for i in range(rows):
697
- lines = []
698
- for j in range(cols):
699
- lines.append(image_placeholder)
700
- slices.append("".join(lines))
701
- slice_placeholder = tokenizer.slice_start + "\n".join(slices) + tokenizer.slice_end
702
- return slice_placeholder
 
1
  import math
 
2
  import json
3
  import torch
 
4
  from threading import Thread
5
  from copy import deepcopy
6
  from PIL import Image
7
  from torchvision import transforms
8
+ from transformers import LlamaPreTrainedModel, LlamaForCausalLM, TextIteratorStreamer
9
  from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
10
+ from transformers import AutoProcessor
11
 
12
  from .configuration_minicpm import MiniCPMVConfig
13
  from .resampler import Resampler
 
41
 
42
  return model
43
 
44
+ def init_resampler(self, embed_dim, vision_dim):
45
  return Resampler(
46
  num_queries=self.config.query_num,
47
  embed_dim=embed_dim,
48
  num_heads=embed_dim // 128,
49
  kv_dim=vision_dim,
50
+ adaptive=True
51
  )
52
 
53
  def init_transform(self):
 
59
  ),
60
  ]
61
  )
62
+
63
  def get_input_embeddings(self):
64
  return self.llm.get_input_embeddings()
65
 
66
  def set_input_embeddings(self, value):
67
  self.llm.embed_tokens = value
68
+
69
+ def get_output_embeddings(self):
70
+ return self.llm.lm_head
71
+
72
+ def set_output_embeddings(self, new_embeddings):
73
+ self.llm.lm_head = new_embeddings
74
+
75
+ def set_decoder(self, decoder):
76
+ self.llm = decoder
77
+
78
+ def get_decoder(self):
79
+ return self.llm
80
+
81
  def get_vllm_embedding(self, data):
82
  if 'vision_hidden_states' not in data:
83
  dtype = self.llm.model.embed_tokens.weight.dtype
 
90
  for pixel_values in pixel_values_list:
91
  img_cnt.append(len(pixel_values))
92
  all_pixel_values.extend([i.flatten(end_dim=1).permute(1, 0) for i in pixel_values])
93
+
94
  # exist image
95
  if all_pixel_values:
96
  tgt_sizes = torch.vstack(tgt_sizes).type(torch.int32)
 
118
  single_pixel_values = single_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
119
  single_vision_embedding = self.vpm(single_pixel_values.type(dtype)).last_hidden_state
120
  single_vision_embedding = self.resampler(single_vision_embedding, single_tgt_size.unsqueeze(0))
 
121
  vision_embedding.append(single_vision_embedding)
122
  vision_embedding = torch.vstack(vision_embedding)
123
 
 
163
  image_indices = torch.stack(
164
  [torch.arange(r[0], r[1], dtype=torch.long) for r in cur_image_bound]
165
  ).to(vllm_embedding.device)
166
+
167
  cur_vllm_emb.scatter_(0, image_indices.view(-1, 1).repeat(1, cur_vllm_emb.shape[-1]),
168
  cur_vs_hs.view(-1, cur_vs_hs.shape[-1]))
169
  elif self.training:
170
  cur_vllm_emb += cur_vs_hs[0].mean() * 0
 
 
171
 
172
+ return vllm_embedding, vision_hidden_states
173
+
174
  def forward(self, data, **kwargs):
175
  vllm_embedding, vision_hidden_states = self.get_vllm_embedding(data)
176
  position_ids = data["position_ids"]
 
184
  **kwargs
185
  )
186
 
187
+ def _decode_text(self, result_ids, tokenizer):
188
+ result_text = []
189
+ for result in result_ids:
190
+ result = result[result != 0]
191
+ if result[0] == tokenizer.bos_id:
192
+ result = result[1:]
193
+ if result[-1] == tokenizer.eos_id or result[-1] == tokenizer.eot_id:
194
+ result = result[:-1]
195
+ result_text.append(tokenizer.decode(result).strip())
196
+ return result_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
+ def _decode(self, inputs_embeds, tokenizer, decode_text=False, **kwargs):
199
  terminators = [
200
  tokenizer.eos_token_id,
201
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
 
206
  eos_token_id=terminators,
207
  **kwargs
208
  )
209
+ if decode_text:
210
+ return self._decode_text(output, tokenizer)
211
+ return output
212
 
213
  def _decode_stream(self, inputs_embeds, tokenizer, **kwargs):
214
  terminators = [
 
229
 
230
  return streamer
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  def generate(
233
  self,
234
+ model_inputs,
 
 
235
  tokenizer=None,
 
236
  vision_hidden_states=None,
 
237
  stream=False,
238
  **kwargs
239
  ):
240
+ bs = len(model_inputs["input_ids"])
241
+ img_list = model_inputs["pixel_values"]
242
+ tgt_sizes = model_inputs["tgt_sizes"]
243
+ if img_list is None:
244
  img_list = [[] for i in range(bs)]
245
  assert bs == len(img_list)
 
 
 
246
  if vision_hidden_states is None:
247
  pixel_values = []
248
  for i in range(bs):
 
258
  else:
259
  model_inputs["vision_hidden_states"] = vision_hidden_states
260
 
261
+ (
262
+ input_embeds,
263
+ vision_hidden_states,
264
+ ) = self.get_vllm_embedding(model_inputs)
 
 
 
 
 
 
265
 
266
+ # output_ids = self._decode(input_embeds, tokenizer, **kwargs)
267
+ if stream:
268
+ kwargs.pop("decode_text")
269
+ result = self._decode_stream(input_embeds, tokenizer, **kwargs)
270
+ else:
271
+ result = self._decode(input_embeds, tokenizer, **kwargs)
272
 
273
  return result
274
 
 
277
  image,
278
  msgs,
279
  tokenizer,
280
+ processor=None,
281
  vision_hidden_states=None,
282
  max_new_tokens=1024,
283
  sampling=True,
 
286
  stream=False,
287
  **kwargs
288
  ):
289
+ if processor is None:
290
+ processor = AutoProcessor.from_pretrained(self.config._name_or_path, trust_remote_code=True)
291
  if isinstance(msgs, str):
292
  msgs = json.loads(msgs)
 
293
  copy_msgs = deepcopy(msgs)
 
 
294
 
295
+ assert len(msgs) > 0, "msgs is empty"
296
+ assert sampling or not stream, "if use stream mode, make sure sampling=True"
297
+
298
+ if image is not None and isinstance(copy_msgs[0]["content"], str):
299
+ # copy_msgs[0]['content'] = '()\n' + copy_msgs[0]['content']
300
+ copy_msgs[0]["content"] = [image, copy_msgs[0]["content"]]
301
 
302
  images = []
 
303
  for i, msg in enumerate(copy_msgs):
304
  role = msg["role"]
305
  content = msg["content"]
 
308
  assert role == "user", "The role of first msg should be user"
309
  if isinstance(content, str):
310
  content = [content]
 
311
  cur_msgs = []
312
  for c in content:
313
  if isinstance(c, Image.Image):
314
+ images.append(c)
315
+ cur_msgs.append("()")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  elif isinstance(c, str):
317
  cur_msgs.append(c)
318
+ msg["content"] = "\n".join(cur_msgs)
 
 
 
 
319
 
320
  if system_prompt:
321
  sys_msg = {'role': 'system', 'content': system_prompt}
322
+ copy_msgs = [sys_msg] + copy_msgs
323
 
324
+ prompt = processor.tokenizer.apply_chat_template(copy_msgs, tokenize=False, add_generation_prompt=True)
325
+ inputs = processor(prompt, images, return_tensors="pt", max_length=max_inp_length).to(self.device)
326
 
327
  if sampling:
328
  generation_config = {
 
341
  generation_config.update(
342
  (k, kwargs[k]) for k in generation_config.keys() & kwargs.keys()
343
  )
 
344
  with torch.inference_mode():
345
+ res = self.generate(
346
+ inputs,
 
 
 
347
  tokenizer=tokenizer,
348
  max_new_tokens=max_new_tokens,
349
  vision_hidden_states=vision_hidden_states,
 
350
  stream=stream,
351
+ decode_text=True,
352
  **generation_config
353
  )
354
+
355
  if stream:
356
  def stream_gen():
357
  for text in res:
 
362
  else:
363
  answer = res[0]
364
  return answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
preprocessor_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_processor_type": "MiniCPMVImageProcessor",
3
+ "auto_map": {
4
+ "AutoProcessor": "processing_minicpmv.MiniCPMVProcessor",
5
+ "AutoImageProcessor": "image_processing_minicpmv.MiniCPMVImageProcessor"
6
+ },
7
+ "processor_class": "MiniCPMVProcessor",
8
+ "max_slice_nums": 9,
9
+ "scale_resolution": 448,
10
+ "patch_size": 14,
11
+ "image_feature_size": 96,
12
+ "im_start": "",
14
+ "slice_start": "<slice>",
15
+ "slice_end": "</slice>",
16
+ "unk": "<unk>",
17
+ "norm_mean": [0.5, 0.5, 0.5],
18
+ "norm_std": [0.5, 0.5, 0.5],
19
+ "version": 2.5
20
+ }
processing_minicpmv.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for MiniCPMV.
17
+ """
18
+
19
+ from typing import List, Optional, Union, Dict, Any
20
+ import torch
21
+ import re
22
+
23
+ from transformers.image_processing_utils import BatchFeature
24
+ from transformers.image_utils import ImageInput
25
+ from transformers.processing_utils import ProcessorMixin
26
+ from transformers.tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
27
+ from transformers.utils import TensorType, requires_backends, is_torch_dtype, is_torch_device
28
+
29
+ from .image_processing_minicpmv import MiniCPMVBatchFeature
30
+
31
+
32
+ class MiniCPMVProcessor(ProcessorMixin):
33
+ r"""
34
+ Constructs a MiniCPMV processor which wraps a MiniCPMV image processor and a MiniCPMV tokenizer into a single processor.
35
+
36
+ [`MiniCPMVProcessor`] offers all the functionalities of [`MiniCPMVImageProcessor`] and [`LlamaTokenizerWrapper`]. See the
37
+ [`~MiniCPMVProcessor.__call__`] and [`~MiniCPMVProcessor.decode`] for more information.
38
+
39
+ Args:
40
+ image_processor ([`MiniCPMVImageProcessor`], *optional*):
41
+ The image processor is a required input.
42
+ tokenizer ([`LlamaTokenizerWrapper`], *optional*):
43
+ The tokenizer is a required input.
44
+ """
45
+ attributes = ["image_processor", "tokenizer"]
46
+ image_processor_class = "AutoImageProcessor"
47
+ tokenizer_class = "AutoTokenizer"
48
+
49
+ def __init__(self, image_processor=None, tokenizer=None):
50
+ super().__init__(image_processor, tokenizer)
51
+ self.version = image_processor.version
52
+
53
+ def __call__(
54
+ self,
55
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
56
+ images: ImageInput = None,
57
+ padding: Union[bool, str, PaddingStrategy] = False,
58
+ truncation: Union[bool, str, TruncationStrategy] = None,
59
+ max_length: Optional[int] = None,
60
+ do_pad: Optional[bool] = True,
61
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
62
+ ) -> MiniCPMVBatchFeature:
63
+ """
64
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
65
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
66
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
67
+ LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
68
+ of the above two methods for more information.
69
+
70
+ Args:
71
+ text (`str`, `List[str]`, `List[List[str]]`):
72
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
73
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
74
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
75
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
76
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
77
+ tensor. Both channels-first and channels-last formats are supported.
78
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
79
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
80
+ index) among:
81
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
82
+ sequence if provided).
83
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
84
+ acceptable input length for the model if that argument is not provided.
85
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
86
+ lengths).
87
+ max_length (`int`, *optional*):
88
+ Maximum length of the returned list and optionally padding length (see above).
89
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
90
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
91
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
92
+ truncation (`bool`, *optional*):
93
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
94
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
95
+ If set, will return tensors of a particular framework. Acceptable values are:
96
+
97
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
98
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
99
+ - `'np'`: Return NumPy `np.ndarray` objects.
100
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
101
+
102
+ Returns:
103
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
104
+
105
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
106
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
107
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
108
+ `None`).
109
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
110
+ """
111
+ if images is not None:
112
+ image_inputs = self.image_processor(images, do_pad=do_pad, return_tensors=return_tensors)
113
+ return self._convert_images_texts_to_inputs(image_inputs, text, max_length=max_length)
114
+
115
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
116
+ def batch_decode(self, *args, **kwargs):
117
+ """
118
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
119
+ refer to the docstring of this method for more information.
120
+ """
121
+ output_ids = args[0]
122
+ result_text = []
123
+ for result in output_ids:
124
+ result = result[result != 0]
125
+ if result[0] == self.tokenizer.bos_id:
126
+ result = result[1:]
127
+ if result[-1] == self.tokenizer.eos_id:
128
+ result = result[:-1]
129
+ result_text.append(self.tokenizer.decode(result, *args[1:], **kwargs).strip())
130
+ return result_text
131
+ # return self.tokenizer.batch_decode(*args, **kwargs)
132
+
133
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
134
+ def decode(self, *args, **kwargs):
135
+ """
136
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
137
+ the docstring of this method for more information.
138
+ """
139
+ result = args[0]
140
+ result = result[result != 0]
141
+ if result[0] == self.tokenizer.bos_id:
142
+ result = result[1:]
143
+ if result[-1] == self.tokenizer.eos_id or (hasattr(self.tokenizer, "eot_id") and result[-1] == self.tokenizer.eot_id):
144
+ result = result[:-1]
145
+ return self.tokenizer.decode(result, *args[1:], **kwargs).strip()
146
+
147
+ def _convert(
148
+ self, input_str, max_inp_length: Optional[int] = None
149
+ ):
150
+ if self.version == 2.5 or self.tokenizer.add_bos_token:
151
+ input_ids = self.tokenizer.encode(input_str)
152
+ else:
153
+ input_ids = [self.tokenizer.bos_id] + self.tokenizer.encode(input_str)
154
+ if max_inp_length is not None:
155
+ input_ids = input_ids[:max_inp_length]
156
+ input_ids = torch.tensor(input_ids, dtype=torch.int32)
157
+
158
+ image_start_tokens = torch.where(input_ids == self.tokenizer.im_start_id)[0]
159
+ image_start_tokens += 1
160
+ image_end_tokens = torch.where(input_ids == self.tokenizer.im_end_id)[0]
161
+ valid_image_nums = max(len(image_start_tokens), len(image_end_tokens))
162
+ image_bounds = torch.hstack(
163
+ [
164
+ image_start_tokens[:valid_image_nums].unsqueeze(-1),
165
+ image_end_tokens[:valid_image_nums].unsqueeze(-1),
166
+ ]
167
+ )
168
+ return input_ids.unsqueeze(0), image_bounds
169
+
170
+ def _convert_images_texts_to_inputs(self, images, texts, do_pad=False, truncation=None, max_length=None, return_tensors=None):
171
+ if not len(images):
172
+ model_inputs = self.tokenizer(texts, return_tensors=return_tensors, padding=do_pad, truncation=truncation, max_length=max_length)
173
+ return MiniCPMVBatchFeature(data={**model_inputs})
174
+
175
+ pattern = "()"
176
+ images, image_sizes, tgt_sizes = images["pixel_values"], images["image_sizes"], images["tgt_sizes"]
177
+
178
+ image_tags = re.findall(pattern, texts)
179
+ assert len(image_tags) == len(image_sizes)
180
+ text_chunks = texts.split(pattern)
181
+ final_texts = ""
182
+ for i in range(len(image_tags)):
183
+ final_texts = final_texts + text_chunks[i] + self.image_processor.get_slice_image_placeholder(image_sizes[i])
184
+ final_texts += text_chunks[-1]
185
+ input_ids, image_bounds = self._convert(final_texts, max_length)
186
+ return MiniCPMVBatchFeature(data={
187
+ "input_ids": input_ids,
188
+ "pixel_values": [images],
189
+ "image_sizes": [image_sizes],
190
+ "image_bound": [image_bounds],
191
+ "tgt_sizes": [tgt_sizes]
192
+ })
193
+
194
+ @property
195
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
196
+ def model_input_names(self):
197
+ tokenizer_input_names = self.tokenizer.model_input_names
198
+ image_processor_input_names = self.image_processor.model_input_names
199
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
200
+
201
+
202
+ def pad(self, orig_items, key, max_length=None, padding_value=0, padding_side="left"):
203
+ items = []
204
+ if isinstance(orig_items[0][key], list):
205
+ assert isinstance(orig_items[0][key][0], torch.Tensor)
206
+ for it in orig_items:
207
+ for tr in it[key]:
208
+ items.append({key: tr})
209
+ else:
210
+ assert isinstance(orig_items[0][key], torch.Tensor)
211
+ items = orig_items
212
+
213
+ batch_size = len(items)
214
+ shape = items[0][key].shape
215
+ dim = len(shape)
216
+ assert dim <= 3
217
+ if max_length is None:
218
+ max_length = 0
219
+ max_length = max(max_length, max(item[key].shape[-1] for item in items))
220
+ min_length = min(item[key].shape[-1] for item in items)
221
+ dtype = items[0][key].dtype
222
+
223
+ if dim == 1:
224
+ return torch.cat([item[key] for item in items], dim=0)
225
+ elif dim == 2:
226
+ if max_length == min_length:
227
+ return torch.cat([item[key] for item in items], dim=0)
228
+ tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
229
+ else:
230
+ tensor = (
231
+ torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype)
232
+ + padding_value
233
+ )
234
+
235
+ for i, item in enumerate(items):
236
+ if dim == 2:
237
+ if padding_side == "left":
238
+ tensor[i, -len(item[key][0]) :] = item[key][0].clone()
239
+ else:
240
+ tensor[i, : len(item[key][0])] = item[key][0].clone()
241
+ elif dim == 3:
242
+ if padding_side == "left":
243
+ tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
244
+ else:
245
+ tensor[i, : len(item[key][0]), :] = item[key][0].clone()
246
+
247
+ return tensor
tokenization_minicpmv_fast.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
4
+
5
+
6
+ class MiniCPMVTokenizerFast(PreTrainedTokenizerFast):
7
+ def __init__(self, **kwargs):
8
+ super().__init__(**kwargs)
9
+ self.eot_token = "<|eot_id|>"
10
+ self.im_start = ""
12
+ self.ref_start = "<ref>"
13
+ self.ref_end = "</ref>"
14
+ self.box_start = "<box>"
15
+ self.box_end = "</box>"
16
+ self.quad_start = "<quad>"
17
+ self.quad_end = "</quad>"
18
+ self.slice_start = "<slice>"
19
+ self.slice_end = "</slice>"
20
+
21
+ @property
22
+ def eos_id(self):
23
+ return self.eos_token_id
24
+
25
+ @property
26
+ def bos_id(self):
27
+ return self.bos_token_id
28
+
29
+ @property
30
+ def unk_id(self):
31
+ return self.unk_token_id
32
+
33
+ @property
34
+ def eot_id(self):
35
+ return self.convert_tokens_to_ids(self.eot_token)
36
+
37
+ @property
38
+ def im_start_id(self):
39
+ return self.convert_tokens_to_ids(self.im_start)
40
+
41
+ @property
42
+ def im_end_id(self):
43
+ return self.convert_tokens_to_ids(self.im_end)
44
+
45
+ @staticmethod
46
+ def escape(text: str) -> str:
47
+ return text
48
+
49
+ @staticmethod
50
+ def unescape(text: str) -> str:
51
+ return text
tokenizer_config.json CHANGED
@@ -2051,7 +2051,7 @@
2051
  },
2052
  "auto_map": {
2053
  "AutoTokenizer": [
2054
- "modeling_minicpmv.PreTrainedTokenizerFastWrapper",
2055
  null
2056
  ]
2057
  },
@@ -2066,7 +2066,7 @@
2066
  "model_max_length": 1000000000000000019884624838656,
2067
  "pad_token": "!",
2068
  "padding_side": "right",
2069
- "tokenizer_class": "PreTrainedTokenizerFastWrapper",
2070
  "truncation_side": "right",
2071
  "unk_token": "<unk>"
2072
  }
 
2051
  },
2052
  "auto_map": {
2053
  "AutoTokenizer": [
2054
+ "tokenization_minicpmv_fast.MiniCPMVTokenizerFast",
2055
  null
2056
  ]
2057
  },
 
2066
  "model_max_length": 1000000000000000019884624838656,
2067
  "pad_token": "!",
2068
  "padding_side": "right",
2069
+ "tokenizer_class": "MiniCPMVTokenizerFast",
2070
  "truncation_side": "right",
2071
  "unk_token": "<unk>"
2072
  }