iakarshu commited on
Commit
1828dbd
·
1 Parent(s): dcf7eb6

Upload modeling.py

Browse files
Files changed (1) hide show
  1. modeling.py +251 -0
modeling.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch
3
+ from transformers import T5ForConditionalGeneration, ViTModel
4
+
5
+ import pytorch_lightning as pl
6
+
7
+ # Defining the pytorch model
8
+
9
+
10
+ class LaTr_for_pretraining(nn.Module):
11
+ def __init__(self, config, classify=False):
12
+
13
+ super(LaTr_for_pretraining, self).__init__()
14
+ self.vocab_size = config['vocab_size']
15
+
16
+ model = T5ForConditionalGeneration.from_pretrained(config['t5_model'])
17
+ # Removing the Embedding layer
18
+ dummy_encoder = list(nn.Sequential(
19
+ *list(model.encoder.children())[1:]).children())
20
+ # Removing the Embedding Layer
21
+ dummy_decoder = list(nn.Sequential(
22
+ *list(model.decoder.children())[1:]).children())
23
+
24
+ # Using the T5 Encoder
25
+
26
+ self.list_encoder = nn.Sequential(*list(dummy_encoder[0]))
27
+ self.residue_encoder = nn.Sequential(*list(dummy_encoder[1:]))
28
+ self.list_decoder = nn.Sequential(*list(dummy_decoder[0]))
29
+ self.residue_decoder = nn.Sequential(*list(dummy_decoder[1:]))
30
+
31
+ # We use the embeddings of T5 for encoding the tokenized words
32
+ self.language_emb = nn.Embedding.from_pretrained(model.shared.weight)
33
+
34
+ self.top_left_x = nn.Embedding(
35
+ config['max_2d_position_embeddings'], config['hidden_state'])
36
+ self.bottom_right_x = nn.Embedding(
37
+ config['max_2d_position_embeddings'], config['hidden_state'])
38
+ self.top_left_y = nn.Embedding(
39
+ config['max_2d_position_embeddings'], config['hidden_state'])
40
+ self.bottom_right_y = nn.Embedding(
41
+ config['max_2d_position_embeddings'], config['hidden_state'])
42
+ self.width_emb = nn.Embedding(
43
+ config['max_2d_position_embeddings'], config['hidden_state'])
44
+ self.height_emb = nn.Embedding(
45
+ config['max_2d_position_embeddings'], config['hidden_state'])
46
+
47
+ self.classify = classify
48
+ self.classification_layer = nn.Linear(
49
+ config['hidden_state'], config['classes'])
50
+
51
+ def forward(self, tokens, coordinates, predict_proba=False, predict_class=False):
52
+
53
+ batch_size = len(tokens)
54
+ embeded_feature = self.language_emb(tokens)
55
+
56
+ top_left_x_feat = self.top_left_x(coordinates[:, :, 0])
57
+ top_left_y_feat = self.top_left_y(coordinates[:, :, 1])
58
+ bottom_right_x_feat = self.bottom_right_x(coordinates[:, :, 2])
59
+ bottom_right_y_feat = self.bottom_right_y(coordinates[:, :, 3])
60
+ width_feat = self.width_emb(coordinates[:, :, 4])
61
+ height_feat = self.height_emb(coordinates[:, :, 5])
62
+
63
+ total_feat = embeded_feature + top_left_x_feat + top_left_y_feat + \
64
+ bottom_right_x_feat + bottom_right_y_feat + width_feat + height_feat
65
+
66
+ # Extracting the feature
67
+
68
+ for layer in self.list_encoder:
69
+ total_feat = layer(total_feat)[0]
70
+ total_feat = self.residue_encoder(total_feat)
71
+
72
+ for layer in self.list_decoder:
73
+ total_feat = layer(total_feat)[0]
74
+ total_feat = self.residue_decoder(total_feat)
75
+
76
+ if self.classify:
77
+ total_feat = self.classification_layer(total_feat)
78
+
79
+ if predict_proba:
80
+ return total_feat.softmax(axis=-1)
81
+
82
+ if predict_class:
83
+ return total_feat.argmax(axis=-1)
84
+
85
+ return total_feat
86
+
87
+
88
+ class LaTr_for_finetuning(nn.Module):
89
+ def __init__(self, config, address_to_pre_trained_weights=None):
90
+ super(LaTr_for_finetuning, self).__init__()
91
+
92
+ self.config = config
93
+ self.vocab_size = config['vocab_size']
94
+
95
+ self.pre_training_model = LaTr_for_pretraining(config)
96
+ if address_to_pre_trained_weights is not None:
97
+ self.pre_training_model.load_state_dict(
98
+ torch.load(address_to_pre_trained_weights))
99
+ self.vit = ViTModel.from_pretrained(
100
+ "google/vit-base-patch16-224-in21k")
101
+
102
+ # In the fine-tuning stage of vit, except the last layer, all the layers were freezed
103
+
104
+ self.classification_head = nn.Linear(
105
+ config['hidden_state'], config['classes'])
106
+
107
+ def forward(self, lang_vect, spatial_vect, quest_vect, img_vect):
108
+
109
+ # The below block of code calculates the language and spatial featuer
110
+ embeded_feature = self.pre_training_model.language_emb(lang_vect)
111
+ top_left_x_feat = self.pre_training_model.top_left_x(
112
+ spatial_vect[:, :, 0])
113
+ top_left_y_feat = self.pre_training_model.top_left_y(
114
+ spatial_vect[:, :, 1])
115
+ bottom_right_x_feat = self.pre_training_model.bottom_right_x(
116
+ spatial_vect[:, :, 2])
117
+ bottom_right_y_feat = self.pre_training_model.bottom_right_y(
118
+ spatial_vect[:, :, 3])
119
+ width_feat = self.pre_training_model.width_emb(spatial_vect[:, :, 4])
120
+ height_feat = self.pre_training_model.height_emb(spatial_vect[:, :, 5])
121
+
122
+ spatial_lang_feat = embeded_feature + top_left_x_feat + top_left_y_feat + \
123
+ bottom_right_x_feat + bottom_right_y_feat + width_feat + height_feat
124
+
125
+ # Extracting the image feature, using the Vision Transformer
126
+ img_feat = self.vit(img_vect).last_hidden_state
127
+
128
+ # Extracting the question vector
129
+ quest_feat = self.pre_training_model.language_emb(quest_vect)
130
+
131
+ # Concating the three features, and then passing it through the T5 Transformer
132
+ final_feat = torch.cat(
133
+ [img_feat, spatial_lang_feat, quest_feat], axis=-2)
134
+
135
+ # Passing through the T5 Transformer
136
+ for layer in self.pre_training_model.list_encoder:
137
+ final_feat = layer(final_feat)[0]
138
+
139
+ final_feat = self.pre_training_model.residue_encoder(final_feat)
140
+
141
+ for layer in self.pre_training_model.list_decoder:
142
+ final_feat = layer(final_feat)[0]
143
+ final_feat = self.pre_training_model.residue_decoder(final_feat)
144
+
145
+ answer_vector = self.classification_head(
146
+ final_feat)[:, :self.config['seq_len'], :]
147
+
148
+ return answer_vector
149
+
150
+
151
+ def polynomial(base_lr, iter, max_iter=1e5, power=1):
152
+ return base_lr * ((1 - float(iter) / max_iter) ** power)
153
+
154
+
155
+ class LaTrForVQA(pl.LightningModule):
156
+ def __init__(self, config, learning_rate=1e-4, max_steps=100000//2):
157
+ super(LaTrForVQA, self).__init__()
158
+
159
+ self.config = config
160
+ self.save_hyperparameters()
161
+ self.latr = LaTr_for_finetuning(config)
162
+ self.training_losses = []
163
+ self.validation_losses = []
164
+ self.max_steps = max_steps
165
+
166
+ def configure_optimizers(self):
167
+ return torch.optim.AdamW(self.parameters(), lr=self.hparams['learning_rate'])
168
+
169
+ def forward(self, batch_dict):
170
+ boxes = batch_dict['boxes']
171
+ img = batch_dict['img']
172
+ question = batch_dict['question']
173
+ words = batch_dict['tokenized_words']
174
+ answer_vector = self.latr(lang_vect=words,
175
+ spatial_vect=boxes,
176
+ img_vect=img,
177
+ quest_vect=question
178
+ )
179
+ return answer_vector
180
+
181
+ def calculate_metrics(self, prediction, labels):
182
+
183
+ # Calculate the accuracy score between the prediction and ground label for a batch, with considering the pad sequence
184
+ batch_size = len(prediction)
185
+ ac_score = 0
186
+
187
+ for (pred, gt) in zip(prediction, labels):
188
+ ac_score += calculate_acc_score(pred.detach().cpu(),
189
+ gt.detach().cpu())
190
+ ac_score = ac_score/batch_size
191
+ return ac_score
192
+
193
+ def training_step(self, batch, batch_idx):
194
+ answer_vector = self.forward(batch)
195
+
196
+ # https://discuss.huggingface.co/t/bertformaskedlm-s-loss-and-scores-how-the-loss-is-computed/607/2
197
+ loss = nn.CrossEntropyLoss(ignore_index=0)(
198
+ answer_vector.reshape(-1, self.config['classes']), batch['answer'].reshape(-1))
199
+ _, preds = torch.max(answer_vector, dim=-1)
200
+
201
+ # Calculating the accuracy score
202
+ train_acc = self.calculate_metrics(preds, batch['answer'])
203
+ train_acc = torch.tensor(train_acc)
204
+
205
+ # Logging
206
+ self.log('train_ce_loss', loss, prog_bar=True)
207
+ self.log('train_acc', train_acc, prog_bar=True)
208
+ self.training_losses.append(loss.item())
209
+
210
+ return loss
211
+
212
+ def validation_step(self, batch, batch_idx):
213
+ logits = self.forward(batch)
214
+ loss = nn.CrossEntropyLoss(ignore_index=0)(
215
+ logits.reshape(-1, self.config['classes']), batch['answer'].reshape(-1))
216
+ _, preds = torch.max(logits, dim=-1)
217
+
218
+ # Validation Accuracy
219
+ val_acc = self.calculate_metrics(preds.cpu(), batch['answer'].cpu())
220
+ val_acc = torch.tensor(val_acc)
221
+
222
+ # Logging
223
+ self.log('val_ce_loss', loss, prog_bar=True)
224
+ self.log('val_acc', val_acc, prog_bar=True)
225
+ self.validation_losses.append(loss.item())
226
+ return {'val_loss': loss, 'val_acc': val_acc}
227
+
228
+ def optimizer_step(self, epoch_nb, batch_nb, optimizer, optimizer_i, opt_closure=None, on_tpu=False,
229
+ using_native_amp=False, using_lbfgs=False):
230
+
231
+ # Warmup for 1000 steps
232
+ if self.trainer.global_step < 1000:
233
+ lr_scale = min(1., float(self.trainer.global_step + 1) / 1000.)
234
+ for pg in optimizer.param_groups:
235
+ pg['lr'] = lr_scale * self.hparams.learning_rate
236
+
237
+ # Linear Decay
238
+ else:
239
+ for pg in optimizer.param_groups:
240
+ pg['lr'] = polynomial(
241
+ self.hparams.learning_rate, self.trainer.global_step, max_iter=self.max_steps)
242
+
243
+ optimizer.step(opt_closure)
244
+ optimizer.zero_grad()
245
+
246
+ def validation_epoch_end(self, outputs):
247
+ val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
248
+ val_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
249
+
250
+ self.log('val_loss_epoch_end', val_loss, on_epoch=True, sync_dist=True)
251
+ self.log('val_acc_epoch_end', val_acc, on_epoch=True, sync_dist=True)