Safetensors
custom_code
kyusonglee commited on
Commit
5e5f119
1 Parent(s): d542e6b

Upload 5 files

Browse files
model-00002-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:018699a8dac60053fc0d0916584af81a7f50f672914020e295e45e2f15ad5856
3
+ size 4937253320
modeling_omchat.py ADDED
@@ -0,0 +1,1354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torch.utils.checkpoint
8
+ from torch import nn
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from .configuration_omchat import OmChatConfig
12
+
13
+ from transformers import Qwen2Config, Qwen2Model, Qwen2ForCausalLM, AutoConfig, AutoModelForCausalLM
14
+ from transformers.utils import logging
15
+ from transformers.modeling_outputs import ModelOutput
16
+ from transformers.utils import (
17
+ add_start_docstrings,
18
+ add_start_docstrings_to_model_forward,
19
+ logging,
20
+ replace_return_docstrings,
21
+ )
22
+
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ _CONFIG_FOR_DOC = "OmChatConfig"
28
+
29
+ from typing import Optional, Tuple, Union
30
+
31
+ import torch
32
+ import torch.nn.functional as F
33
+ import torch.utils.checkpoint
34
+ from einops import rearrange
35
+ from timm.models.layers import DropPath
36
+ from torch import nn
37
+ from transformers.activations import ACT2FN
38
+ from transformers.modeling_outputs import (BaseModelOutput,
39
+ BaseModelOutputWithPooling)
40
+ from transformers.modeling_utils import PreTrainedModel
41
+ from transformers.utils import logging
42
+
43
+ from .configuration_omchat import InternVisionConfig
44
+
45
+ try:
46
+ from .flash_attention import FlashAttention
47
+ has_flash_attn = True
48
+ except:
49
+ print('FlashAttention is not installed.')
50
+ has_flash_attn = False
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+
56
+ class InternRMSNorm(nn.Module):
57
+ def __init__(self, hidden_size, eps=1e-6):
58
+ super().__init__()
59
+ self.weight = nn.Parameter(torch.ones(hidden_size))
60
+ self.variance_epsilon = eps
61
+
62
+ def forward(self, hidden_states):
63
+ input_dtype = hidden_states.dtype
64
+ hidden_states = hidden_states.to(torch.float32)
65
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
66
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
67
+ return self.weight * hidden_states.to(input_dtype)
68
+
69
+
70
+ try:
71
+ from apex.normalization import FusedRMSNorm
72
+
73
+ InternRMSNorm = FusedRMSNorm # noqa
74
+
75
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
76
+ except ImportError:
77
+ # using the normal InternRMSNorm
78
+ pass
79
+ except Exception:
80
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
81
+ pass
82
+
83
+
84
+ class InternVisionEmbeddings(nn.Module):
85
+ def __init__(self, config: InternVisionConfig):
86
+ super().__init__()
87
+ self.config = config
88
+ self.embed_dim = config.hidden_size
89
+ self.image_size = config.image_size
90
+ self.patch_size = config.patch_size
91
+
92
+ self.class_embedding = nn.Parameter(
93
+ torch.randn(1, 1, self.embed_dim),
94
+ )
95
+
96
+ self.patch_embedding = nn.Conv2d(
97
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
98
+ )
99
+
100
+ self.num_patches = (self.image_size // self.patch_size) ** 2
101
+ self.num_positions = self.num_patches + 1
102
+
103
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
104
+
105
+ def _get_pos_embed(self, pos_embed, H, W):
106
+ target_dtype = pos_embed.dtype
107
+ pos_embed = pos_embed.float().reshape(
108
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
109
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False).\
110
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
111
+ return pos_embed
112
+
113
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
114
+ target_dtype = self.patch_embedding.weight.dtype
115
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
116
+ batch_size, _, height, width = patch_embeds.shape
117
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
118
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
119
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
120
+ position_embedding = torch.cat([
121
+ self.position_embedding[:, :1, :],
122
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
123
+ ], dim=1)
124
+ embeddings = embeddings + position_embedding.to(target_dtype)
125
+ return embeddings
126
+
127
+
128
+ class InternAttention(nn.Module):
129
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
130
+
131
+ def __init__(self, config: InternVisionConfig):
132
+ super().__init__()
133
+ self.config = config
134
+ self.embed_dim = config.hidden_size
135
+ self.num_heads = config.num_attention_heads
136
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
137
+ if config.use_flash_attn and not has_flash_attn:
138
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
139
+ self.head_dim = self.embed_dim // self.num_heads
140
+ if self.head_dim * self.num_heads != self.embed_dim:
141
+ raise ValueError(
142
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
143
+ f' {self.num_heads}).'
144
+ )
145
+
146
+ self.scale = self.head_dim ** -0.5
147
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
148
+ self.attn_drop = nn.Dropout(config.attention_dropout)
149
+ self.proj_drop = nn.Dropout(config.dropout)
150
+
151
+ self.qk_normalization = config.qk_normalization
152
+
153
+ if self.qk_normalization:
154
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
155
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
156
+
157
+ if self.use_flash_attn:
158
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
159
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
160
+
161
+ def _naive_attn(self, x):
162
+ B, N, C = x.shape
163
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
164
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
165
+
166
+ if self.qk_normalization:
167
+ B_, H_, N_, D_ = q.shape
168
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
169
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
170
+
171
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
172
+ attn = attn.softmax(dim=-1)
173
+ attn = self.attn_drop(attn)
174
+
175
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
176
+ x = self.proj(x)
177
+ x = self.proj_drop(x)
178
+ return x
179
+
180
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
181
+ qkv = self.qkv(x)
182
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
183
+
184
+ if self.qk_normalization:
185
+ q, k, v = qkv.unbind(2)
186
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
187
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
188
+ qkv = torch.stack([q, k, v], dim=2)
189
+
190
+ context, _ = self.inner_attn(
191
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
192
+ )
193
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
194
+ outs = self.proj_drop(outs)
195
+ return outs
196
+
197
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
198
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
199
+ return x
200
+
201
+
202
+ class InternMLP(nn.Module):
203
+ def __init__(self, config: InternVisionConfig):
204
+ super().__init__()
205
+ self.config = config
206
+ self.act = ACT2FN[config.hidden_act]
207
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
208
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
209
+
210
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
211
+ hidden_states = self.fc1(hidden_states)
212
+ hidden_states = self.act(hidden_states)
213
+ hidden_states = self.fc2(hidden_states)
214
+ return hidden_states
215
+
216
+
217
+ class InternVisionEncoderLayer(nn.Module):
218
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
219
+ super().__init__()
220
+ self.embed_dim = config.hidden_size
221
+ self.intermediate_size = config.intermediate_size
222
+
223
+ self.attn = InternAttention(config)
224
+ self.mlp = InternMLP(config)
225
+ self.norm1 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
226
+ self.norm2 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
227
+
228
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
229
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
230
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
231
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
232
+
233
+ def forward(
234
+ self,
235
+ hidden_states: torch.Tensor,
236
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
237
+ """
238
+ Args:
239
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
240
+ """
241
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
242
+
243
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
244
+
245
+ return hidden_states
246
+
247
+
248
+ class InternVisionEncoder(nn.Module):
249
+ """
250
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
251
+ [`InternEncoderLayer`].
252
+
253
+ Args:
254
+ config (`InternConfig`):
255
+ The corresponding vision configuration for the `InternEncoder`.
256
+ """
257
+
258
+ def __init__(self, config: InternVisionConfig):
259
+ super().__init__()
260
+ self.config = config
261
+ # stochastic depth decay rule
262
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
263
+ self.layers = nn.ModuleList([
264
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
265
+ self.gradient_checkpointing = True
266
+
267
+ def forward(
268
+ self,
269
+ inputs_embeds,
270
+ output_hidden_states: Optional[bool] = None,
271
+ return_dict: Optional[bool] = None,
272
+ ) -> Union[Tuple, BaseModelOutput]:
273
+ r"""
274
+ Args:
275
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
276
+ Embedded representation of the inputs. Should be float, not int tokens.
277
+ output_hidden_states (`bool`, *optional*):
278
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
279
+ for more detail.
280
+ return_dict (`bool`, *optional*):
281
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
282
+ """
283
+ output_hidden_states = (
284
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
285
+ )
286
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
287
+
288
+ encoder_states = () if output_hidden_states else None
289
+ hidden_states = inputs_embeds
290
+
291
+ for idx, encoder_layer in enumerate(self.layers):
292
+ if output_hidden_states:
293
+ encoder_states = encoder_states + (hidden_states,)
294
+ if self.gradient_checkpointing and self.training:
295
+ layer_outputs = torch.utils.checkpoint.checkpoint(
296
+ encoder_layer,
297
+ hidden_states)
298
+ else:
299
+ layer_outputs = encoder_layer(
300
+ hidden_states,
301
+ )
302
+ hidden_states = layer_outputs
303
+
304
+ if output_hidden_states:
305
+ encoder_states = encoder_states + (hidden_states,)
306
+
307
+ if not return_dict:
308
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
309
+ return BaseModelOutput(
310
+ last_hidden_state=hidden_states, hidden_states=encoder_states
311
+ )
312
+
313
+
314
+ class InternVisionModel(PreTrainedModel):
315
+ main_input_name = 'pixel_values'
316
+ config_class = InternVisionConfig
317
+ _no_split_modules=["InternVisionEncoderLayer"]
318
+
319
+ def __init__(self, config: InternVisionConfig):
320
+ super().__init__(config)
321
+ self.config = config
322
+
323
+ self.embeddings = InternVisionEmbeddings(config)
324
+ self.encoder = InternVisionEncoder(config)
325
+
326
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
327
+ pos_emb = self.embeddings.position_embedding
328
+ _, num_positions, embed_dim = pos_emb.shape
329
+ cls_emb = pos_emb[:, :1, :]
330
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
331
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
332
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
333
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
334
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
335
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
336
+
337
+ def get_input_embeddings(self):
338
+ return self.embeddings
339
+
340
+ def forward(
341
+ self,
342
+ pixel_values: Optional[torch.FloatTensor] = None,
343
+ output_hidden_states: Optional[bool] = None,
344
+ return_dict: Optional[bool] = None,
345
+ pixel_embeds: Optional[torch.FloatTensor] = None,
346
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
347
+ output_hidden_states = (
348
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
349
+ )
350
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
351
+
352
+ if pixel_values is None and pixel_embeds is None:
353
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
354
+
355
+ if pixel_embeds is not None:
356
+ hidden_states = pixel_embeds
357
+ else:
358
+ if len(pixel_values.shape) == 4:
359
+ hidden_states = self.embeddings(pixel_values)
360
+ else:
361
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
362
+ encoder_outputs = self.encoder(
363
+ inputs_embeds=hidden_states,
364
+ output_hidden_states=output_hidden_states,
365
+ return_dict=return_dict,
366
+ )
367
+ last_hidden_state = encoder_outputs.last_hidden_state
368
+ pooled_output = last_hidden_state[:, 0, :]
369
+
370
+ if not return_dict:
371
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
372
+
373
+ return BaseModelOutputWithPooling(
374
+ last_hidden_state=last_hidden_state,
375
+ pooler_output=pooled_output,
376
+ hidden_states=encoder_outputs.hidden_states,
377
+ attentions=encoder_outputs.attentions,
378
+ )
379
+ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
380
+ """
381
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
382
+
383
+ Args:
384
+ image_size (`tuple`):
385
+ The size of the input image in the format (width, height).
386
+ grid_pinpoints (`List`):
387
+ A list containing possible resolutions. Each item in the list should be a tuple or list
388
+ of the form `(height, width)`.
389
+ patch_size (`int`):
390
+ The size of each image patch.
391
+
392
+ Returns:
393
+ tuple: The shape of the image patch grid in the format (width, height).
394
+ """
395
+ if not isinstance(grid_pinpoints, list):
396
+ raise TypeError("grid_pinpoints should be a list of tuples or lists")
397
+
398
+ # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
399
+ if not isinstance(image_size, (list, tuple)):
400
+ if not isinstance(image_size, (torch.Tensor, np.ndarray)):
401
+ raise TypeError(
402
+ f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor"
403
+ )
404
+ image_size = image_size.tolist()
405
+
406
+ height, width = select_best_resolution(image_size, grid_pinpoints)
407
+ return height // patch_size, width // patch_size
408
+
409
+
410
+ def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int):
411
+ """
412
+ Calculate the number of patches after the preprocessing for images of any resolution.
413
+
414
+ Args:
415
+ image_size (`torch.LongTensor` or `np.ndarray` or `Tuple[int, int]`):
416
+ The size of the input image in the format (height, width). ?
417
+ grid_pinpoints (`List`):
418
+ A list containing possible resolutions. Each item in the list should be a tuple or list
419
+ of the form `(height, width)`.
420
+ patch_size (`int`):
421
+ The size of each image patch.
422
+
423
+ Returns:
424
+ int: the number of patches
425
+ """
426
+ if not isinstance(grid_pinpoints, list):
427
+ raise TypeError("grid_pinpoints should be a list of tuples or lists")
428
+
429
+ # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
430
+ if not isinstance(image_size, (list, tuple)):
431
+ if not isinstance(image_size, (torch.Tensor, np.ndarray)):
432
+ raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}")
433
+ image_size = image_size.tolist()
434
+
435
+ best_resolution = select_best_resolution(image_size, grid_pinpoints)
436
+ height, width = best_resolution
437
+ num_patches = 0
438
+ # consider change to ceil(height/patch_size)*ceil(width/patch_size) + 1
439
+ for i in range(0, height, patch_size):
440
+ for j in range(0, width, patch_size):
441
+ num_patches += 1
442
+ # add the base patch
443
+ num_patches += 1
444
+ return num_patches
445
+
446
+
447
+ def unpad_image(tensor, original_size):
448
+ """
449
+ Unpads a PyTorch tensor of a padded and resized image.
450
+
451
+ Args:
452
+ tensor (`torch.Tensor`):
453
+ The image tensor, assumed to be of shape (num_channels, height, width).
454
+ original_size (`tuple`):
455
+ The original size of the image (height, width).
456
+
457
+ Returns:
458
+ `torch.Tensor`: The unpadded image tensor.
459
+ """
460
+ original_height, original_width = original_size
461
+ current_height, current_width = tensor.shape[1:]
462
+
463
+ original_aspect_ratio = original_width / original_height
464
+ current_aspect_ratio = current_width / current_height
465
+
466
+ if original_aspect_ratio > current_aspect_ratio:
467
+ scale_factor = current_width / original_width
468
+ new_height = int(original_height * scale_factor)
469
+ padding = (current_height - new_height) // 2
470
+ unpadded_tensor = tensor[:, padding : current_height - padding, :]
471
+ else:
472
+ scale_factor = current_height / original_height
473
+ new_width = int(original_width * scale_factor)
474
+ padding = (current_width - new_width) // 2
475
+ unpadded_tensor = tensor[:, :, padding : current_width - padding]
476
+
477
+ return unpadded_tensor
478
+
479
+
480
+ @dataclass
481
+ # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->OmChat
482
+ class OmChatCausalLMOutputWithPast(ModelOutput):
483
+ """
484
+ Base class for OmChat causal language model (or autoregressive) outputs.
485
+
486
+ Args:
487
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
488
+ Language modeling loss (for next-token prediction).
489
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
490
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
491
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
492
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
493
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
494
+
495
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
496
+ `past_key_values` input) to speed up sequential decoding.
497
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
498
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
499
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
500
+
501
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
502
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
503
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
504
+ sequence_length)`.
505
+
506
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
507
+ heads.
508
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
509
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
510
+ sequence_length, hidden_size)`.
511
+
512
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
513
+ """
514
+
515
+ loss: Optional[torch.FloatTensor] = None
516
+ logits: torch.FloatTensor = None
517
+ past_key_values: Optional[List[torch.FloatTensor]] = None
518
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
519
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
520
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
521
+
522
+
523
+ # Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->OmChat
524
+ class OmChatMultiModalProjector(nn.Module):
525
+ def __init__(self, config: OmChatConfig):
526
+ super().__init__()
527
+
528
+ self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
529
+ self.act = nn.GELU()
530
+ self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
531
+
532
+ def forward(self, image_features):
533
+ hidden_states = self.linear_1(image_features)
534
+ hidden_states = self.act(hidden_states)
535
+ hidden_states = self.linear_2(hidden_states)
536
+ return hidden_states
537
+
538
+ OMCHAT_START_DOCSTRING = r"""
539
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
540
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
541
+ etc.)
542
+
543
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
544
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
545
+ and behavior.
546
+
547
+ Parameters:
548
+ config ([`OmChatConfig`] or [`OmChatVisionConfig`]):
549
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
550
+ load the weights associated with the model, only the configuration. Check out the
551
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
552
+ """
553
+
554
+
555
+ @add_start_docstrings(
556
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
557
+ OMCHAT_START_DOCSTRING,
558
+ )
559
+ # Copied from transformers.models.llava.modeling_llava.LlavaPreTrainedModel with Llava->OmChat,llava->omchat
560
+ class OmChatPreTrainedModel(PreTrainedModel):
561
+ config_class = OmChatConfig
562
+ base_model_prefix = "model"
563
+ supports_gradient_checkpointing = True
564
+ _no_split_modules = ["OmChatVisionAttention"]
565
+ _skip_keys_device_placement = "past_key_values"
566
+ _supports_flash_attn_2 = True
567
+ _supports_cache_class = True
568
+
569
+ def _init_weights(self, module):
570
+ # important: this ported version of OmChat isn't meant for training from scratch - only
571
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
572
+ # https://github.com/haotian-liu/LLaVA/tree/main/omchat should serve for that purpose
573
+ std = (
574
+ self.config.initializer_range
575
+ if hasattr(self.config, "initializer_range")
576
+ else self.config.text_config.initializer_range
577
+ )
578
+
579
+ if hasattr(module, "class_embedding"):
580
+ module.class_embedding.data.normal_(mean=0.0, std=std)
581
+
582
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
583
+ module.weight.data.normal_(mean=0.0, std=std)
584
+ if module.bias is not None:
585
+ module.bias.data.zero_()
586
+ elif isinstance(module, nn.Embedding):
587
+ module.weight.data.normal_(mean=0.0, std=std)
588
+ if module.padding_idx is not None:
589
+ module.weight.data[module.padding_idx].zero_()
590
+
591
+ @property
592
+ def _supports_sdpa(self):
593
+ """
594
+ Retrieve language_model's attribute to check whether the model supports
595
+ SDPA or not.
596
+ """
597
+ return self.language_model._supports_sdpa
598
+
599
+
600
+ OMCHAT_INPUTS_DOCSTRING = r"""
601
+ Args:
602
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
603
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
604
+ it.
605
+
606
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
607
+ [`PreTrainedTokenizer.__call__`] for details.
608
+
609
+ [What are input IDs?](../glossary#input-ids)
610
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
611
+ The tensors corresponding to the input images. Pixel values can be obtained using
612
+ [`AutoImageProcessor`]. See [`OmChatImageProcessor.__call__`] for details. [`LlavaProcessor`] uses
613
+ [`OmChatImageProcessor`] for processing images.
614
+ image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`, *optional*):
615
+ The sizes of the images in the batch, being (height, width) for each image.
616
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
617
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
618
+
619
+ - 1 for tokens that are **not masked**,
620
+ - 0 for tokens that are **masked**.
621
+
622
+ [What are attention masks?](../glossary#attention-mask)
623
+
624
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
625
+ [`PreTrainedTokenizer.__call__`] for details.
626
+
627
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
628
+ `past_key_values`).
629
+
630
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
631
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
632
+ information on the default strategy.
633
+
634
+ - 1 indicates the head is **not masked**,
635
+ - 0 indicates the head is **masked**.
636
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
637
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
638
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
639
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
640
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
641
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
642
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
643
+
644
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
645
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
646
+
647
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
648
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
649
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
650
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
651
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
652
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
653
+ model's internal embedding lookup matrix.
654
+ vision_feature_layer (`int`, *optional*, defaults to -2):
655
+ The index of the layer to select the vision feature.
656
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
657
+ The feature selection strategy used to select the vision feature from the vision backbone.
658
+ Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
659
+ If `"full"`, the full vision features are used.
660
+ use_cache (`bool`, *optional*):
661
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
662
+ `past_key_values`).
663
+ output_attentions (`bool`, *optional*):
664
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
665
+ tensors for more detail.
666
+ output_hidden_states (`bool`, *optional*):
667
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
668
+ more detail.
669
+ return_dict (`bool`, *optional*):
670
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
671
+ """
672
+
673
+
674
+ @add_start_docstrings(
675
+ """The OmChat model which consists of a vision backbone and a language model.""",
676
+ OMCHAT_START_DOCSTRING,
677
+ )
678
+ class OmChatForConditionalGeneration(OmChatPreTrainedModel):
679
+ def __init__(self, config: OmChatConfig):
680
+ super().__init__(config)
681
+ self.vision_tower = InternVisionModel(InternVisionConfig())
682
+
683
+ self.multi_modal_projector = OmChatMultiModalProjector(config)
684
+ self.vocab_size = config.text_config.vocab_size
685
+ self.language_model = Qwen2ForCausalLM._from_config(
686
+ config.text_config, attn_implementation=config._attn_implementation
687
+ )
688
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
689
+ self._padding_side = "left" # set it to left by default, user can use setter to change padding_sides
690
+ self.post_init()
691
+
692
+ @property
693
+ def padding_side(self):
694
+ return self._padding_side
695
+
696
+ @padding_side.setter
697
+ def padding_side(self, padding_side: str):
698
+ if padding_side not in ["left", "right"]:
699
+ raise ValueError(f"{padding_side} is not `left` or `right`.")
700
+ self._padding_side = padding_side
701
+
702
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_input_embeddings
703
+ def get_input_embeddings(self):
704
+ return self.language_model.get_input_embeddings()
705
+
706
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_input_embeddings
707
+ def set_input_embeddings(self, value):
708
+ self.language_model.set_input_embeddings(value)
709
+
710
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_output_embeddings
711
+ def get_output_embeddings(self):
712
+ return self.language_model.get_output_embeddings()
713
+
714
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_output_embeddings
715
+ def set_output_embeddings(self, new_embeddings):
716
+ self.language_model.set_output_embeddings(new_embeddings)
717
+
718
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_decoder
719
+ def set_decoder(self, decoder):
720
+ self.language_model.set_decoder(decoder)
721
+
722
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_decoder
723
+ def get_decoder(self):
724
+ return self.language_model.get_decoder()
725
+
726
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.tie_weights
727
+ def tie_weights(self):
728
+ return self.language_model.tie_weights()
729
+
730
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.resize_token_embeddings
731
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
732
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
733
+ # update vocab size
734
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
735
+ self.vocab_size = model_embeds.num_embeddings
736
+ return model_embeds
737
+
738
+ def get_vision_tower(self):
739
+ if isinstance(self.vision_tower, list):
740
+ return self.vision_tower[0]
741
+ return self.vision_tower
742
+
743
+ def get_model(self):
744
+ return self.language_model.model
745
+
746
+ def encode_images(self, images):
747
+ vision_tower = self.get_vision_tower()
748
+ image_features = self.vision_tower_forward(images)
749
+ return self.multi_modal_projector(image_features.to(torch.float16))
750
+
751
+ def feature_select(self, image_forward_outs):
752
+ image_features = image_forward_outs.hidden_states[-1]
753
+ image_features = image_features[:, 1:]
754
+ return image_features
755
+
756
+ def vision_tower_forward(self, images):
757
+ if type(images) is list:
758
+ image_features = []
759
+ for image in images:
760
+ image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
761
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
762
+ image_features.append(image_feature)
763
+ else:
764
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=torch.float16), output_hidden_states=True)
765
+ #image_forward_outs = self.vision_tower(images, output_hidden_states=True)
766
+ image_features = self.feature_select(image_forward_outs)
767
+
768
+ return image_features
769
+
770
+ def prepare_inputs_labels_for_multimodal(
771
+ self, input_ids, position_ids, attention_mask, past_key_values, labels, images
772
+ ):
773
+
774
+ vision_tower = self.get_vision_tower()
775
+ video_tower = self.get_vision_tower()
776
+ if (vision_tower is None and video_tower is None) or images is None or input_ids.shape[1] == 1:
777
+ if past_key_values is not None and (vision_tower is not None or video_tower is not None) and images is not None and input_ids.shape[1] == 1:
778
+ target_shape = past_key_values[-1][-1].shape[-2] + 1
779
+ attention_mask = torch.cat((attention_mask, torch.ones(
780
+ (attention_mask.shape[0], target_shape - attention_mask.shape[1]),
781
+ dtype=attention_mask.dtype,
782
+ device=attention_mask.device
783
+ )), dim=1)
784
+ position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
785
+ return input_ids, position_ids, attention_mask, past_key_values, None, labels
786
+
787
+ image_idx = [idx for idx, img in enumerate(images) if img.ndim == 3]
788
+ is_all_image = len(image_idx) == len(images)
789
+ video_idx = [idx for idx, vid in enumerate(images) if vid.ndim == 4]
790
+ images_minibatch = torch.stack([images[idx] for idx in image_idx]) if len(image_idx) > 0 else [] # mini_b c h w
791
+ videos_minibatch = torch.stack([images[idx] for idx in video_idx]) if len(video_idx) > 0 else [] # mini_b c t h w
792
+
793
+ tmp_image_features = [None] * (len(image_idx) + len(video_idx))
794
+ if getattr(images_minibatch, 'ndim', 0) == 4: # batch consists of images, [mini_b, c, h, w]
795
+ if vision_tower is not None:
796
+ image_features_minibatch = self.encode_images(images_minibatch) # [mini_b, l, c]
797
+ else:
798
+ image_features_minibatch = torch.randn(1).to(self.device) # dummy feature for video-only training under tuning
799
+ for i, pos in enumerate(image_idx):
800
+ tmp_image_features[pos] = image_features_minibatch[i]
801
+ if getattr(videos_minibatch, 'ndim', 0) == 5: # batch consists of videos, [mini_b, c, t, h, w]
802
+ video_features_minibatch = self.encode_images(videos_minibatch) # fake list [mini_b, t, l, c]
803
+ for i, pos in enumerate(video_idx):
804
+ tmp_image_features[pos] = video_features_minibatch[i]
805
+ new_tmp = []
806
+ for image in tmp_image_features:
807
+ if isinstance(image, list):
808
+ t = len(image)
809
+ for i in range(t):
810
+ new_tmp.append(image[i])
811
+ else:
812
+ new_tmp.append(image)
813
+ image_features = new_tmp
814
+
815
+ if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
816
+ raise NotImplementedError
817
+
818
+ _labels = labels
819
+ _position_ids = position_ids
820
+ _attention_mask = attention_mask
821
+ if attention_mask is None:
822
+ attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
823
+ else:
824
+ attention_mask = attention_mask.bool()
825
+ if position_ids is None:
826
+ position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
827
+ if labels is None:
828
+ labels = torch.full_like(input_ids, -100)
829
+
830
+ input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
831
+ labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
832
+ new_input_embeds = []
833
+ new_labels = []
834
+ cur_image_idx = 0
835
+ for batch_idx, cur_input_ids in enumerate(input_ids):
836
+ num_images = (cur_input_ids == -200).sum()
837
+ if num_images == 0:
838
+ cur_image_features = image_features[cur_image_idx]
839
+ cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
840
+ cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
841
+ new_input_embeds.append(cur_input_embeds)
842
+ new_labels.append(labels[batch_idx])
843
+ cur_image_idx += 1
844
+ continue
845
+
846
+ image_token_indices = [-1] + torch.where(cur_input_ids == -200)[0].tolist() + [cur_input_ids.shape[0]]
847
+ cur_input_ids_noim = []
848
+ cur_labels = labels[batch_idx]
849
+ cur_labels_noim = []
850
+ for i in range(len(image_token_indices) - 1):
851
+ cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]])
852
+ cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]])
853
+ split_sizes = [x.shape[0] for x in cur_labels_noim]
854
+ cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
855
+ cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
856
+
857
+ cur_new_input_embeds = []
858
+ cur_new_labels = []
859
+
860
+ for i in range(num_images + 1):
861
+ cur_new_input_embeds.append(cur_input_embeds_no_im[i])
862
+ cur_new_labels.append(cur_labels_noim[i])
863
+ if i < num_images:
864
+ cur_image_features = image_features[cur_image_idx].to(self.device)
865
+ cur_image_idx += 1
866
+ cur_new_input_embeds.append(cur_image_features)
867
+ cur_new_labels.append(torch.full((cur_image_features.shape[0],), -100, device=cur_labels.device, dtype=cur_labels.dtype))
868
+
869
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds)
870
+ cur_new_labels = torch.cat(cur_new_labels)
871
+
872
+ new_input_embeds.append(cur_new_input_embeds)
873
+ new_labels.append(cur_new_labels)
874
+
875
+ # Truncate sequences to max length as image embeddings can make the sequence longer
876
+ tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)
877
+ if tokenizer_model_max_length is not None:
878
+ new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds]
879
+ new_labels = [x[:tokenizer_model_max_length] for x in new_labels]
880
+
881
+ max_len = max(x.shape[0] for x in new_input_embeds)
882
+ batch_size = len(new_input_embeds)
883
+
884
+ new_input_embeds_padded = []
885
+ new_labels_padded = torch.full((batch_size, max_len), -100, dtype=new_labels[0].dtype, device=new_labels[0].device)
886
+ attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
887
+ position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
888
+
889
+ for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
890
+ cur_len = cur_new_embed.shape[0]
891
+ if getattr(self.config, 'tokenizer_padding_side', 'right') == "left":
892
+ new_input_embeds_padded.append(torch.cat((
893
+ torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device),
894
+ cur_new_embed
895
+ ), dim=0))
896
+ if cur_len > 0:
897
+ new_labels_padded[i, -cur_len:] = cur_new_labels
898
+ attention_mask[i, -cur_len:] = True
899
+ position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
900
+ else:
901
+ new_input_embeds_padded.append(torch.cat((
902
+ cur_new_embed,
903
+ torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)
904
+ ), dim=0))
905
+ if cur_len > 0:
906
+ new_labels_padded[i, :cur_len] = cur_new_labels
907
+ attention_mask[i, :cur_len] = True
908
+ position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
909
+
910
+ new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
911
+ if _labels is None:
912
+ new_labels = None
913
+ else:
914
+ new_labels = new_labels_padded
915
+
916
+ if _attention_mask is None:
917
+ attention_mask = None
918
+ else:
919
+ attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
920
+
921
+ if _position_ids is None:
922
+ position_ids = None
923
+
924
+ return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
925
+
926
+
927
+ def _merge_input_ids_with_image_features(
928
+ self,
929
+ image_features,
930
+ feature_lens,
931
+ inputs_embeds,
932
+ input_ids,
933
+ attention_mask,
934
+ position_ids=None,
935
+ labels=None,
936
+ image_token_index=None,
937
+ ignore_index=-100,
938
+ ):
939
+ """
940
+ Merge input_ids with with image features into final embeddings
941
+
942
+ Args:
943
+ image_features (`torch.Tensor` of shape `(all_feature_lens, embed_dim)`):
944
+ All vision vectors of all images in the batch
945
+ feature_lens (`torch.LongTensor` of shape `(num_images)`):
946
+ The length of visual embeddings of each image as stacked in `image_features`
947
+ inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, embed_dim)`):
948
+ Token embeddings before merging with visual embeddings
949
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
950
+ Input_ids of tokens, possibly filled with image token
951
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
952
+ Mask to avoid performing attention on padding token indices.
953
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
954
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
955
+ config.n_positions - 1]`.
956
+ labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*)
957
+ :abels need to be recalculated to support training (if provided)
958
+ image_token_index (`int`, *optional*)
959
+ Token id used to indicate the special "image" token. Defaults to `config.image_token_index`
960
+ ignore_index (`int`, *optional*)
961
+ Value that is used to pad `labels` and will be ignored when calculated loss. Default: -100.
962
+ Returns:
963
+ final_embedding, final_attention_mask, position_ids, final_labels
964
+
965
+ Explanation:
966
+ each image has variable length embeddings, with length specified by feature_lens
967
+ image_features is concatenation of all visual embed vectors
968
+ task: fill each <image> with the correct number of visual embeddings
969
+ Example:
970
+ X (5 patches), Y (3 patches), Z (8)
971
+ X, Y are in the same sequence (in-context learning)
972
+ if right padding
973
+ input_ids: [
974
+ a b c d e f X g h i j k Y l m
975
+ o p q r Z s t u v _ _ _ _ _ _
976
+ ]
977
+ input_ids should be: [
978
+ a b c d e f X X X X X g h i j k Y Y Y l m
979
+ o p q r Z Z Z Z Z Z Z Z s t u v _ _ _ _ _
980
+ ]
981
+ labels should be: [
982
+ a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
983
+ o p q r _ _ _ _ _ _ _ _ s t u v _ _ _ _ _
984
+ ]
985
+ elif left padding
986
+ input_ids: [
987
+ a b c d e f X g h i j k Y l m
988
+ _ _ _ _ _ _ o p q r Z s t u v
989
+ ]
990
+ input_ids should be: [
991
+ a b c d e f X X X X X g h i j k Y Y Y l m
992
+ _ _ _ _ _ o p q r Z Z Z Z Z Z Z Z s t u v
993
+ ]
994
+ labels should be: [
995
+ a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
996
+ _ _ _ _ _ o p q r _ _ _ _ _ _ _ _ s t u v
997
+ ]
998
+ Edge cases:
999
+ * If tokens are same but image token sizes are different, then cannot infer left or right padding
1000
+ ```python
1001
+ cat_img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
1002
+ chart_img = Image.open(requests.get("https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true", stream=True).raw)
1003
+ prompts = [
1004
+ "[INST] <image>\nWhat is shown in this image? [/INST]",
1005
+ "[INST] <image>\nWhat is shown in this image? [/INST]",
1006
+ ]
1007
+ inputs = processor(prompts, [chart_img, cat_img], return_tensors='pt', padding=True).to("cuda")
1008
+ chart_img has 2634 tokens, while cat_img has 2340 tokens
1009
+ ```
1010
+
1011
+ input_ids: [
1012
+ a b c d X g h
1013
+ i j Y k l m n
1014
+ ]
1015
+ where X is 3 tokens while Y is 5, this mean after merge
1016
+ if left-padding (batched generation)
1017
+ input_ids should be: [
1018
+ _ _ a b c d X X X g h
1019
+ i j Y Y Y Y Y k l m n
1020
+ ]
1021
+ elif (right padding) (training)
1022
+ input_ids should be: [
1023
+ a b c d X X X g h _ _
1024
+ i j Y Y Y Y Y k l m n
1025
+ ]
1026
+ """
1027
+ image_token_index = image_token_index if image_token_index is not None else self.config.image_token_index
1028
+ ignore_index = ignore_index if ignore_index is not None else self.config.ignore_index
1029
+
1030
+ with torch.no_grad():
1031
+ # ! in llava 1.6, number of patches is variable
1032
+ num_images = feature_lens.size(0)
1033
+ num_image_features, embed_dim = image_features.shape
1034
+ if feature_lens.sum() != num_image_features:
1035
+ raise ValueError(f"{feature_lens=} / {feature_lens.sum()} != {image_features.shape=}")
1036
+ batch_size = input_ids.shape[0]
1037
+ _left_padding = torch.any(attention_mask[:, 0] == 0)
1038
+ _right_padding = torch.any(attention_mask[:, -1] == 0)
1039
+
1040
+ left_padding = True if not self.training else False
1041
+ if batch_size > 1 and not self.training:
1042
+ if _left_padding and not _right_padding:
1043
+ left_padding = True
1044
+ elif not _left_padding and _right_padding:
1045
+ left_padding = False
1046
+ elif not _left_padding and not _right_padding:
1047
+ # both side is 1, so cannot tell
1048
+ left_padding = self.padding_side == "left"
1049
+ else:
1050
+ # invalid attention_mask
1051
+ raise ValueError(f"both side of attention_mask has zero, invalid. {attention_mask}")
1052
+
1053
+ # Whether to turn off right padding
1054
+ # 1. Create a mask to know where special image tokens are
1055
+ special_image_token_mask = input_ids == image_token_index
1056
+ # special_image_token_mask: [bsz, seqlen]
1057
+ num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
1058
+ # num_special_image_tokens: [bsz]
1059
+ # Reserve for padding of num_images
1060
+ total_num_special_image_tokens = torch.sum(special_image_token_mask)
1061
+ if total_num_special_image_tokens != num_images:
1062
+ raise ValueError(
1063
+ f"Number of image tokens in input_ids ({total_num_special_image_tokens}) different from num_images ({num_images})."
1064
+ )
1065
+ # Compute the maximum embed dimension
1066
+ # max_image_feature_lens is max_feature_lens per batch
1067
+ feature_lens = feature_lens.to(input_ids.device)
1068
+ feature_lens_batch = feature_lens.split(num_special_image_tokens.tolist(), dim=0)
1069
+ feature_lens_batch_sum = torch.tensor([x.sum() for x in feature_lens_batch], device=input_ids.device)
1070
+ embed_sequence_lengths = (
1071
+ (attention_mask == 1).long().sum(-1) - num_special_image_tokens + feature_lens_batch_sum
1072
+ )
1073
+ max_embed_dim = embed_sequence_lengths.max()
1074
+
1075
+ batch_indices, non_image_indices = torch.where((input_ids != image_token_index) & (attention_mask == 1))
1076
+ # 2. Compute the positions where text should be written
1077
+ # Calculate new positions for text tokens in merged image-text sequence.
1078
+ # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images` text tokens.
1079
+ # `torch.cumsum` computes how each image token shifts subsequent text token positions.
1080
+ # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
1081
+ # ! instead of special_image_token_mask * (num_image_patches - 1)
1082
+ # special_image_token_mask * (num_feature_len - 1)
1083
+ special_image_token_mask = special_image_token_mask.long()
1084
+ special_image_token_mask[special_image_token_mask == 1] = feature_lens - 1
1085
+ new_token_positions = torch.cumsum((special_image_token_mask + 1), -1) - 1
1086
+ if left_padding:
1087
+ # shift right token positions so that they are ending at the same number
1088
+ # the below here was incorrect? new_token_positions += new_token_positions[:, -1].max() - new_token_positions[:, -1:]
1089
+ new_token_positions += max_embed_dim - 1 - new_token_positions[:, -1:]
1090
+
1091
+ text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
1092
+
1093
+ # 3. Create the full embedding, already padded to the maximum position
1094
+ final_embedding = torch.zeros(
1095
+ batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
1096
+ )
1097
+ final_attention_mask = torch.zeros(
1098
+ batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
1099
+ )
1100
+ final_input_ids = torch.full(
1101
+ (batch_size, max_embed_dim), self.pad_token_id, dtype=input_ids.dtype, device=inputs_embeds.device
1102
+ )
1103
+ # In case the Vision model or the Language model has been offloaded to CPU, we need to manually
1104
+ # set the corresponding tensors into their correct target device.
1105
+ target_device = inputs_embeds.device
1106
+ batch_indices, non_image_indices, text_to_overwrite = (
1107
+ batch_indices.to(target_device),
1108
+ non_image_indices.to(target_device),
1109
+ text_to_overwrite.to(target_device),
1110
+ )
1111
+ attention_mask = attention_mask.to(target_device)
1112
+ input_ids = input_ids.to(target_device)
1113
+
1114
+ # 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
1115
+ # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
1116
+ final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
1117
+ final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
1118
+ final_input_ids[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_image_indices]
1119
+ final_labels = None
1120
+ if labels is not None:
1121
+ labels = labels.to(target_device)
1122
+ final_labels = torch.full_like(final_attention_mask, ignore_index).to(torch.long)
1123
+ final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
1124
+
1125
+ # 5. Fill the embeddings corresponding to the images. Anything that is not `text_positions` needs filling (#29835)
1126
+ with torch.no_grad():
1127
+ image_to_overwrite = torch.full(
1128
+ (batch_size, max_embed_dim), True, dtype=torch.bool, device=inputs_embeds.device
1129
+ )
1130
+ image_to_overwrite[batch_indices, text_to_overwrite] = False
1131
+ embed_indices = torch.arange(max_embed_dim).unsqueeze(0).to(target_device)
1132
+ embed_indices = embed_indices.expand(batch_size, max_embed_dim)
1133
+ embed_seq_lens = embed_sequence_lengths[:, None].to(target_device)
1134
+
1135
+ if left_padding:
1136
+ # exclude padding on the left
1137
+ max_embed_dim = max_embed_dim.to(target_device)
1138
+ val = (max_embed_dim - embed_indices) <= embed_seq_lens
1139
+ else:
1140
+ # exclude padding on the right
1141
+ val = embed_indices < embed_seq_lens
1142
+ image_to_overwrite &= val
1143
+
1144
+ if image_to_overwrite.sum() != num_image_features:
1145
+ raise ValueError(
1146
+ f"{image_to_overwrite.sum()=} != {num_image_features=} The input provided to the model are wrong. "
1147
+ f"The number of image tokens is {torch.sum(special_image_token_mask)} while"
1148
+ f" the number of image given to the model is {num_images}. "
1149
+ f"This prevents correct indexing and breaks batch generation."
1150
+ )
1151
+ final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
1152
+ final_attention_mask |= image_to_overwrite
1153
+ position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
1154
+
1155
+ return final_embedding, final_attention_mask, position_ids, final_labels, final_input_ids
1156
+
1157
+ def pack_image_features(self, image_features, image_sizes, image_newline=None):
1158
+ """
1159
+ Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.
1160
+
1161
+ Args:
1162
+ image_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)
1163
+ List of image feature tensor, each contains all the visual feature of all patches.
1164
+ image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
1165
+ Actual image size of each images (H, W).
1166
+ image_newline (`torch.Tensor` of shape `(embed_dim)`)
1167
+ New line embedding vector.
1168
+ Returns:
1169
+ image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)
1170
+ feature_lens (`List[int]`)
1171
+ token length of each image in image_features
1172
+ """
1173
+ new_image_features = []
1174
+ feature_lens = []
1175
+ for image_idx, image_feature in enumerate(image_features):
1176
+ if image_feature.shape[0] > 1:
1177
+ base_image_feature = image_feature[0]
1178
+ image_feature = image_feature[1:]
1179
+ height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
1180
+ if height * width != base_image_feature.shape[0]:
1181
+ raise ValueError("The number of patches is not consistent with the image size.")
1182
+ num_patch_height, num_patch_width = get_anyres_image_grid_shape(
1183
+ image_sizes[image_idx],
1184
+ self.config.image_grid_pinpoints,
1185
+ self.config.vision_config.image_size,
1186
+ )
1187
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
1188
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
1189
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
1190
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
1191
+ if image_newline is not None:
1192
+ image_feature = torch.cat(
1193
+ (
1194
+ image_feature,
1195
+ image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.dtype),
1196
+ ),
1197
+ dim=-1,
1198
+ )
1199
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
1200
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
1201
+ else:
1202
+ image_feature = image_feature[0]
1203
+ if image_newline is not None:
1204
+ image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)
1205
+ new_image_features.append(image_feature)
1206
+ feature_lens.append(image_feature.size(0))
1207
+ image_features = torch.cat(new_image_features, dim=0)
1208
+ feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features.device)
1209
+ return image_features, feature_lens
1210
+
1211
+ @add_start_docstrings_to_model_forward(OMCHAT_INPUTS_DOCSTRING)
1212
+ @replace_return_docstrings(output_type=OmChatCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1213
+ def forward(
1214
+ self,
1215
+ input_ids: torch.LongTensor = None,
1216
+ attention_mask: Optional[torch.Tensor] = None,
1217
+ position_ids: Optional[torch.LongTensor] = None,
1218
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1219
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1220
+ vision_feature_layer: Optional[int] = None,
1221
+ vision_feature_select_strategy: Optional[str] = None,
1222
+ labels: Optional[torch.LongTensor] = None,
1223
+ use_cache: Optional[bool] = None,
1224
+ output_attentions: Optional[bool] = None,
1225
+ output_hidden_states: Optional[bool] = None,
1226
+ images: Optional[torch.FloatTensor] = None,
1227
+ return_dict: Optional[bool] = None,
1228
+ ) -> Union[Tuple, OmChatCausalLMOutputWithPast]:
1229
+ r"""
1230
+ Args:
1231
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1232
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1233
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1234
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1235
+
1236
+ Returns:
1237
+
1238
+ Example:
1239
+
1240
+ ```python
1241
+ >>> from PIL import Image
1242
+ >>> import requests
1243
+ >>> from transformers import AutoProcessor, OmChatForConditionalGeneration
1244
+
1245
+ >>> model = OmChatForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
1246
+ >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
1247
+
1248
+ >>> prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
1249
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1250
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1251
+
1252
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
1253
+
1254
+ >>> # Generate
1255
+ >>> generate_ids = model.generate(**inputs, max_length=30)
1256
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1257
+ "[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot (...)"
1258
+ ```"""
1259
+
1260
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1261
+ output_hidden_states = (
1262
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1263
+ )
1264
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1265
+ vision_feature_layer = (
1266
+ vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
1267
+ )
1268
+ vision_feature_select_strategy = (
1269
+ vision_feature_select_strategy
1270
+ if vision_feature_select_strategy is not None
1271
+ else self.config.vision_feature_select_strategy
1272
+ )
1273
+ if inputs_embeds is None:
1274
+ (
1275
+ input_ids,
1276
+ position_ids,
1277
+ attention_mask,
1278
+ past_key_values,
1279
+ inputs_embeds,
1280
+ labels
1281
+ ) = self.prepare_inputs_labels_for_multimodal(
1282
+ input_ids,
1283
+ position_ids,
1284
+ attention_mask,
1285
+ past_key_values,
1286
+ labels,
1287
+ images
1288
+ )
1289
+ outputs = self.language_model(
1290
+ input_ids=input_ids,
1291
+ attention_mask=attention_mask,
1292
+ position_ids=position_ids,
1293
+ past_key_values=past_key_values,
1294
+ inputs_embeds=inputs_embeds,
1295
+ use_cache=use_cache,
1296
+ output_attentions=output_attentions,
1297
+ output_hidden_states=output_hidden_states,
1298
+ return_dict=return_dict
1299
+ )
1300
+ return outputs
1301
+ logits = outputs[0]
1302
+
1303
+ loss = None
1304
+ if labels is not None:
1305
+ # Shift so that tokens < n predict n
1306
+ if attention_mask is not None:
1307
+ shift_attention_mask = attention_mask[..., 1:]
1308
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
1309
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
1310
+ else:
1311
+ shift_logits = logits[..., :-1, :].contiguous()
1312
+ shift_labels = labels[..., 1:].contiguous()
1313
+ # Flatten the tokens
1314
+ loss_fct = nn.CrossEntropyLoss()
1315
+ loss = loss_fct(
1316
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
1317
+ )
1318
+
1319
+ if not return_dict:
1320
+ output = (logits,) + outputs[1:]
1321
+ return (loss,) + output if loss is not None else output
1322
+ return OmChatCausalLMOutputWithPast(
1323
+ loss=loss,
1324
+ logits=logits,
1325
+ past_key_values=outputs.past_key_values,
1326
+ hidden_states=outputs.hidden_states,
1327
+ attentions=outputs.attentions,
1328
+ )
1329
+
1330
+ def prepare_inputs_for_generation(
1331
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1332
+ ):
1333
+ if past_key_values:
1334
+ input_ids = input_ids[:, -1:]
1335
+
1336
+ if inputs_embeds is not None and past_key_values is None:
1337
+ model_inputs = {"inputs_embeds": inputs_embeds}
1338
+ else:
1339
+ model_inputs = {"input_ids": input_ids}
1340
+
1341
+ model_inputs.update(
1342
+ {
1343
+ "past_key_values": past_key_values,
1344
+ "use_cache": kwargs.get("use_cache"),
1345
+ "attention_mask": attention_mask,
1346
+ "images": kwargs.get("images", None),
1347
+ }
1348
+ )
1349
+ return model_inputs
1350
+
1351
+
1352
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration._reorder_cache
1353
+ def _reorder_cache(self, *args, **kwargs):
1354
+ return self.language_model._reorder_cache(*args, **kwargs)
preprocessor_config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_omchat.OmChatProcessor",
4
+ "AutoImageProcessor": "image_processing_omchat.OmChatImageProcessor"
5
+ },
6
+
7
+ "crop_size": {
8
+ "height": 448,
9
+ "width": 448
10
+ },
11
+ "do_center_crop": true,
12
+ "do_convert_rgb": true,
13
+ "do_normalize": true,
14
+ "do_rescale": true,
15
+ "do_resize": true,
16
+ "image_grid_pinpoints": [
17
+ [
18
+ 448,
19
+ 896
20
+ ],
21
+ [
22
+ 896,
23
+ 448
24
+ ],
25
+ [
26
+ 896,
27
+ 896
28
+ ],
29
+ [
30
+ 1344,
31
+ 448
32
+ ],
33
+ [
34
+ 448,
35
+ 1344
36
+ ],
37
+ [
38
+ 1344,
39
+ 896
40
+ ],
41
+ [
42
+ 896,
43
+ 1344
44
+ ],
45
+ [
46
+ 1344,
47
+ 1344
48
+ ]
49
+ ],
50
+ "image_mean": [
51
+ 0.485,
52
+ 0.456,
53
+ 0.406
54
+ ],
55
+ "image_processor_type": "OmChatImageProcessor",
56
+ "image_std": [
57
+ 0.229,
58
+ 0.224,
59
+ 0.225
60
+ ],
61
+ "processor_class": "OmChatProcessor",
62
+ "resample": 3,
63
+ "rescale_factor": 0.00392156862745098,
64
+ "size": {
65
+ "shortest_edge": 448
66
+ }
67
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|endoftext|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "processor_class": "OmChatProcessor",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }