Upload modeling_mplug_owl2.py with huggingface_hub
Browse files- modeling_mplug_owl2.py +409 -0
modeling_mplug_owl2.py
ADDED
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 Haotian Liu & Qinghao Ye (Modified from LLaVA)
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from abc import ABC, abstractmethod
|
16 |
+
from typing import List, Optional, Tuple, Union
|
17 |
+
|
18 |
+
import torch
|
19 |
+
import torch.nn as nn
|
20 |
+
from torch.nn import CrossEntropyLoss
|
21 |
+
|
22 |
+
import copy
|
23 |
+
import os
|
24 |
+
import sys
|
25 |
+
|
26 |
+
dir_path = os.path.dirname(os.path.realpath(__file__))
|
27 |
+
sys.path.insert(0, dir_path)
|
28 |
+
|
29 |
+
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, CLIPImageProcessor, LlamaConfig, LlamaModel, LlamaForCausalLM
|
30 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
31 |
+
|
32 |
+
from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
|
33 |
+
from .visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel
|
34 |
+
from .modeling_llama2 import replace_llama_modality_adaptive
|
35 |
+
IGNORE_INDEX = -100
|
36 |
+
IMAGE_TOKEN_INDEX = -200
|
37 |
+
DEFAULT_IMAGE_TOKEN = "<|image|>"
|
38 |
+
from icecream import ic
|
39 |
+
|
40 |
+
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
|
41 |
+
prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)]
|
42 |
+
|
43 |
+
def insert_separator(X, sep):
|
44 |
+
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
|
45 |
+
|
46 |
+
input_ids = []
|
47 |
+
offset = 0
|
48 |
+
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
|
49 |
+
offset = 1
|
50 |
+
input_ids.append(prompt_chunks[0][0])
|
51 |
+
|
52 |
+
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
|
53 |
+
input_ids.extend(x[offset:])
|
54 |
+
|
55 |
+
if return_tensors is not None:
|
56 |
+
if return_tensors == 'pt':
|
57 |
+
return torch.tensor(input_ids, dtype=torch.long)
|
58 |
+
raise ValueError(f'Unsupported tensor type: {return_tensors}')
|
59 |
+
return input_ids
|
60 |
+
|
61 |
+
def expand2square(pil_img, background_color):
|
62 |
+
from PIL import Image
|
63 |
+
width, height = pil_img.size
|
64 |
+
if width == height:
|
65 |
+
return pil_img
|
66 |
+
elif width > height:
|
67 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
68 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
69 |
+
return result
|
70 |
+
else:
|
71 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
72 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
73 |
+
return result
|
74 |
+
|
75 |
+
class MPLUGOwl2MetaModel:
|
76 |
+
def __init__(self, config):
|
77 |
+
super(MPLUGOwl2MetaModel, self).__init__(config)
|
78 |
+
self.vision_model = MplugOwlVisionModel(
|
79 |
+
MplugOwlVisionConfig(**config.visual_config["visual_model"])
|
80 |
+
)
|
81 |
+
self.visual_abstractor = MplugOwlVisualAbstractorModel(
|
82 |
+
MplugOwlVisualAbstractorConfig(**config.visual_config["visual_abstractor"]), config.hidden_size
|
83 |
+
)
|
84 |
+
|
85 |
+
def get_vision_tower(self):
|
86 |
+
vision_model = getattr(self, 'vision_model', None)
|
87 |
+
if type(vision_model) is list:
|
88 |
+
vision_model = vision_model[0]
|
89 |
+
return vision_model
|
90 |
+
|
91 |
+
def get_visual_abstractor(self):
|
92 |
+
visual_abstractor = getattr(self, 'visual_abstractor', None)
|
93 |
+
if type(visual_abstractor) is list:
|
94 |
+
visual_abstractor = visual_abstractor[0]
|
95 |
+
return visual_abstractor
|
96 |
+
|
97 |
+
|
98 |
+
class MPLUGOwl2MetaForCausalLM(ABC):
|
99 |
+
@abstractmethod
|
100 |
+
def get_model(self):
|
101 |
+
pass
|
102 |
+
|
103 |
+
def encode_images(self, images):
|
104 |
+
image_features = self.get_model().vision_model(images).last_hidden_state
|
105 |
+
image_features = self.get_model().visual_abstractor(encoder_hidden_states=image_features).last_hidden_state
|
106 |
+
return image_features
|
107 |
+
|
108 |
+
def prepare_inputs_labels_for_multimodal(
|
109 |
+
self, input_ids, attention_mask, past_key_values, labels, images
|
110 |
+
):
|
111 |
+
if images is None or input_ids.shape[1] == 1:
|
112 |
+
if past_key_values is not None and images is not None and input_ids.shape[1] == 1:
|
113 |
+
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
|
114 |
+
multiway_indices = torch.zeros_like(input_ids).long().to(self.device)
|
115 |
+
return input_ids, multiway_indices, attention_mask, past_key_values, None, labels
|
116 |
+
|
117 |
+
if type(images) is list or images.ndim == 5:
|
118 |
+
concat_images = torch.cat([image for image in images], dim=0)
|
119 |
+
image_features = self.encode_images(concat_images)
|
120 |
+
split_sizes = [image.shape[0] for image in images]
|
121 |
+
image_features = torch.split(image_features, split_sizes, dim=0)
|
122 |
+
image_features = [x.flatten(0, 1) for x in image_features]
|
123 |
+
else:
|
124 |
+
image_features = self.encode_images(images)
|
125 |
+
|
126 |
+
new_input_embeds = []
|
127 |
+
new_modality_indicators = []
|
128 |
+
new_labels = [] if labels is not None else None
|
129 |
+
cur_image_idx = 0
|
130 |
+
for batch_idx, cur_input_ids in enumerate(input_ids):
|
131 |
+
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
|
132 |
+
# multimodal LLM, but the current sample is not multimodal
|
133 |
+
# FIXME: this is a hacky fix, for deepspeed zero3 to work
|
134 |
+
half_len = cur_input_ids.shape[0] // 2
|
135 |
+
cur_image_features = image_features[cur_image_idx]
|
136 |
+
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
|
137 |
+
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
|
138 |
+
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
|
139 |
+
new_input_embeds.append(cur_input_embeds)
|
140 |
+
|
141 |
+
cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device)
|
142 |
+
new_modality_indicators.append(cur_modality_indicators)
|
143 |
+
if labels is not None:
|
144 |
+
new_labels.append(labels[batch_idx])
|
145 |
+
cur_image_idx += 1
|
146 |
+
continue
|
147 |
+
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
148 |
+
cur_new_input_embeds = []
|
149 |
+
cur_modality_indicators = []
|
150 |
+
if labels is not None:
|
151 |
+
cur_labels = labels[batch_idx]
|
152 |
+
cur_new_labels = []
|
153 |
+
assert cur_labels.shape == cur_input_ids.shape
|
154 |
+
while image_token_indices.numel() > 0:
|
155 |
+
cur_image_features = image_features[cur_image_idx]
|
156 |
+
image_token_start = image_token_indices[0]
|
157 |
+
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
|
158 |
+
cur_new_input_embeds.append(cur_image_features)
|
159 |
+
|
160 |
+
# Add modality indicator
|
161 |
+
assert image_token_start == len(cur_input_ids[:image_token_start])
|
162 |
+
cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long())
|
163 |
+
cur_modality_indicators.append(torch.ones(len(cur_image_features)).long())
|
164 |
+
|
165 |
+
if labels is not None:
|
166 |
+
cur_new_labels.append(cur_labels[:image_token_start])
|
167 |
+
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
|
168 |
+
cur_labels = cur_labels[image_token_start+1:]
|
169 |
+
cur_image_idx += 1
|
170 |
+
cur_input_ids = cur_input_ids[image_token_start+1:]
|
171 |
+
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
172 |
+
if cur_input_ids.numel() > 0:
|
173 |
+
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
|
174 |
+
cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long())
|
175 |
+
if labels is not None:
|
176 |
+
cur_new_labels.append(cur_labels)
|
177 |
+
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
|
178 |
+
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
|
179 |
+
new_input_embeds.append(cur_new_input_embeds)
|
180 |
+
|
181 |
+
# Modality
|
182 |
+
cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators]
|
183 |
+
cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0)
|
184 |
+
new_modality_indicators.append(cur_modality_indicators)
|
185 |
+
|
186 |
+
|
187 |
+
if labels is not None:
|
188 |
+
cur_new_labels = torch.cat(cur_new_labels, dim=0)
|
189 |
+
new_labels.append(cur_new_labels)
|
190 |
+
|
191 |
+
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
|
192 |
+
max_len = max(x.shape[0] for x in new_input_embeds)
|
193 |
+
|
194 |
+
# Embedding
|
195 |
+
new_input_embeds_align = []
|
196 |
+
for cur_new_embed in new_input_embeds:
|
197 |
+
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
|
198 |
+
new_input_embeds_align.append(cur_new_embed)
|
199 |
+
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
|
200 |
+
|
201 |
+
# Modality
|
202 |
+
new_modality_indicators_align = []
|
203 |
+
for cur_modality_indicator in new_modality_indicators:
|
204 |
+
cur_new_embed = torch.cat((cur_modality_indicator, torch.zeros(max_len - cur_modality_indicator.shape[0], dtype=cur_modality_indicator.dtype, device=cur_modality_indicator.device)), dim=0)
|
205 |
+
new_modality_indicators_align.append(cur_new_embed)
|
206 |
+
new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0)
|
207 |
+
|
208 |
+
# Label
|
209 |
+
if labels is not None:
|
210 |
+
new_labels_align = []
|
211 |
+
_new_labels = new_labels
|
212 |
+
for cur_new_label in new_labels:
|
213 |
+
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
|
214 |
+
new_labels_align.append(cur_new_label)
|
215 |
+
new_labels = torch.stack(new_labels_align, dim=0)
|
216 |
+
|
217 |
+
# Attention Mask
|
218 |
+
if attention_mask is not None:
|
219 |
+
new_attention_mask = []
|
220 |
+
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
|
221 |
+
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
|
222 |
+
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
|
223 |
+
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
|
224 |
+
new_attention_mask.append(cur_new_attention_mask)
|
225 |
+
attention_mask = torch.stack(new_attention_mask, dim=0)
|
226 |
+
assert attention_mask.shape == new_labels.shape
|
227 |
+
else:
|
228 |
+
new_input_embeds = torch.stack(new_input_embeds, dim=0)
|
229 |
+
new_modality_indicators = torch.stack(new_modality_indicators, dim=0)
|
230 |
+
if labels is not None:
|
231 |
+
new_labels = torch.stack(new_labels, dim=0)
|
232 |
+
|
233 |
+
if attention_mask is not None:
|
234 |
+
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
|
235 |
+
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
|
236 |
+
assert attention_mask.shape == new_input_embeds.shape[:2]
|
237 |
+
return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels
|
238 |
+
|
239 |
+
|
240 |
+
|
241 |
+
class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel):
|
242 |
+
config_class = MPLUGOwl2Config
|
243 |
+
|
244 |
+
def __init__(self, config: MPLUGOwl2Config):
|
245 |
+
super(MPLUGOwl2LlamaModel, self).__init__(config)
|
246 |
+
|
247 |
+
|
248 |
+
class MPLUGOwl2LlamaForCausalLM(LlamaForCausalLM, MPLUGOwl2MetaForCausalLM):
|
249 |
+
config_class = MPLUGOwl2Config
|
250 |
+
|
251 |
+
def __init__(self, config):
|
252 |
+
super(LlamaForCausalLM, self).__init__(config)
|
253 |
+
self.model = MPLUGOwl2LlamaModel(config)
|
254 |
+
|
255 |
+
self.tokenizer = AutoTokenizer.from_pretrained("q-future/one-align")
|
256 |
+
self.image_processor = CLIPImageProcessor.from_pretrained("q-future/one-align")
|
257 |
+
|
258 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
259 |
+
self.preferential_ids_ = [id_[1] for id_ in self.tokenizer(["excellent","good","fair","poor","bad"])["input_ids"]]
|
260 |
+
|
261 |
+
# Initialize weights and apply final processing
|
262 |
+
self.post_init()
|
263 |
+
|
264 |
+
|
265 |
+
def get_model(self):
|
266 |
+
return self.model
|
267 |
+
|
268 |
+
def chat(self, prompt: str, images, **generate_kwargs):
|
269 |
+
input_ids = tokenizer_image_token(prompt, self.tokenizer, -200, return_tensors='pt').unsqueeze(0).to(self.device)
|
270 |
+
images = [Image.open("fig/singapore_flyer.jpg"), Image.open("fig/boy_colorful.jpg")]
|
271 |
+
images = [expand2square(img, tuple(int(x*255) for x in self.image_processor.image_mean)) for img in images]
|
272 |
+
image_tensor = model.image_processor.preprocess(images, return_tensors="pt")["pixel_values"].half().to(self.device)
|
273 |
+
|
274 |
+
return
|
275 |
+
def score(self, images,
|
276 |
+
task_: str = "quality",
|
277 |
+
input_: str = "image",
|
278 |
+
):
|
279 |
+
if not hasattr(self, "weight_tensor"):
|
280 |
+
self.weight_tensor = torch.Tensor([5.,4.,3.,2.,1.]).half().to(self.device)
|
281 |
+
prompt = "USER: How would you rate the {} of this {}?\n<|image|>\nASSISTANT: The {} of the {} is".format(task_, input_, input_, task_)
|
282 |
+
if input_ == "image":
|
283 |
+
images = [expand2square(img, tuple(int(x*255) for x in self.image_processor.image_mean)) for img in images]
|
284 |
+
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
|
285 |
+
with torch.inference_mode():
|
286 |
+
image_tensor = self.image_processor.preprocess(images, return_tensors="pt")["pixel_values"].half().to(self.device)
|
287 |
+
output_logits = self(input_ids.repeat(image_tensor.shape[0], 1),
|
288 |
+
images=image_tensor)["logits"][:,-1, self.preferential_ids_]
|
289 |
+
return torch.softmax(output_logits, -1) @ self.weight_tensor
|
290 |
+
else:
|
291 |
+
video = [[expand2square(frame, tuple(int(x*255) for x in self.image_processor.image_mean)) for frame in vid] for vid in images]
|
292 |
+
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
|
293 |
+
with torch.inference_mode():
|
294 |
+
video_tensors = [self.image_processor.preprocess(vid, return_tensors="pt")["pixel_values"].half().to(self.model.device) for vid in video]
|
295 |
+
output_logits = self(input_ids.repeat(len(video_tensors), 1),
|
296 |
+
images=video_tensors)["logits"][:,-1, self.preferential_ids_]
|
297 |
+
return torch.softmax(output_logits, -1) @ self.weight_tensor
|
298 |
+
|
299 |
+
def forward(
|
300 |
+
self,
|
301 |
+
input_ids: torch.LongTensor = None,
|
302 |
+
# modality_indicators: torch.LongTensor = None,
|
303 |
+
attention_mask: Optional[torch.Tensor] = None,
|
304 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
305 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
306 |
+
labels: Optional[torch.LongTensor] = None,
|
307 |
+
use_cache: Optional[bool] = None,
|
308 |
+
output_attentions: Optional[bool] = None,
|
309 |
+
output_hidden_states: Optional[bool] = None,
|
310 |
+
images: Optional[torch.FloatTensor] = None,
|
311 |
+
return_dict: Optional[bool] = None,
|
312 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
313 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
314 |
+
output_hidden_states = (
|
315 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
316 |
+
)
|
317 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
318 |
+
input_ids, modality_indicators, attention_mask, past_key_values, inputs_embeds, labels = \
|
319 |
+
self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
|
320 |
+
|
321 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
322 |
+
outputs = self.model(
|
323 |
+
input_ids=input_ids,
|
324 |
+
modality_indicators=modality_indicators,
|
325 |
+
attention_mask=attention_mask,
|
326 |
+
past_key_values=past_key_values,
|
327 |
+
inputs_embeds=inputs_embeds,
|
328 |
+
use_cache=use_cache,
|
329 |
+
output_attentions=output_attentions,
|
330 |
+
output_hidden_states=output_hidden_states,
|
331 |
+
return_dict=return_dict
|
332 |
+
)
|
333 |
+
|
334 |
+
hidden_states = outputs[0]
|
335 |
+
logits = self.lm_head(hidden_states)
|
336 |
+
|
337 |
+
loss = None
|
338 |
+
if labels is not None:
|
339 |
+
# Shift so that tokens < n predict n
|
340 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
341 |
+
shift_labels = labels[..., 1:].contiguous()
|
342 |
+
# Flatten the tokens
|
343 |
+
loss_fct = CrossEntropyLoss()
|
344 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
345 |
+
shift_labels = shift_labels.view(-1)
|
346 |
+
# Enable model/pipeline parallelism
|
347 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
348 |
+
loss = loss_fct(shift_logits, shift_labels)
|
349 |
+
|
350 |
+
if not return_dict:
|
351 |
+
output = (logits,) + outputs[1:]
|
352 |
+
return (loss,) + output if loss is not None else output
|
353 |
+
|
354 |
+
return CausalLMOutputWithPast(
|
355 |
+
loss=loss,
|
356 |
+
logits=logits,
|
357 |
+
past_key_values=outputs.past_key_values,
|
358 |
+
hidden_states=outputs.hidden_states,
|
359 |
+
attentions=outputs.attentions,
|
360 |
+
)
|
361 |
+
|
362 |
+
def prepare_inputs_for_generation(
|
363 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
364 |
+
):
|
365 |
+
if past_key_values:
|
366 |
+
input_ids = input_ids[:, -1:]
|
367 |
+
|
368 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
369 |
+
if inputs_embeds is not None and past_key_values is None:
|
370 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
371 |
+
else:
|
372 |
+
model_inputs = {"input_ids": input_ids}
|
373 |
+
|
374 |
+
model_inputs.update(
|
375 |
+
{
|
376 |
+
"past_key_values": past_key_values,
|
377 |
+
"use_cache": kwargs.get("use_cache"),
|
378 |
+
"attention_mask": attention_mask,
|
379 |
+
"images": kwargs.get("images", None),
|
380 |
+
}
|
381 |
+
)
|
382 |
+
return model_inputs
|
383 |
+
|
384 |
+
AutoConfig.register("mplug_owl2", MPLUGOwl2Config)
|
385 |
+
AutoModelForCausalLM.register(MPLUGOwl2Config, MPLUGOwl2LlamaForCausalLM)
|
386 |
+
|
387 |
+
replace_llama_modality_adaptive()
|
388 |
+
|
389 |
+
if __name__ == "__main__":
|
390 |
+
config = MPLUGOwl2Config.from_pretrained('q-future/one-align')
|
391 |
+
from icecream import ic
|
392 |
+
# config = MPLUGOwl2Config()
|
393 |
+
model = AutoModelForCausalLM(config)
|
394 |
+
|
395 |
+
images = torch.randn(2, 3, 448, 448)
|
396 |
+
input_ids = torch.cat([
|
397 |
+
torch.ones(8).long(), torch.tensor([-1]*1).long(), torch.ones(8).long(), torch.tensor([-1]*1).long(), torch.ones(8).long()
|
398 |
+
], dim=0).unsqueeze(0)
|
399 |
+
labels = input_ids.clone()
|
400 |
+
labels[labels < 0] = -100
|
401 |
+
|
402 |
+
# image_feature = model.encode_images(images)
|
403 |
+
# ic(image_feature.shape)
|
404 |
+
|
405 |
+
output = model(images=images, input_ids=input_ids, labels=labels)
|
406 |
+
ic(output.loss)
|
407 |
+
ic(output.logits.shape)
|
408 |
+
|
409 |
+
model.save_pretrained('/cpfs01/shared/public/test/tmp_owl')
|