marcusinthesky
commited on
Commit
•
49b1e28
1
Parent(s):
511722f
Upload 2 files
Browse files- configuration_vtde.py +15 -0
- modelling_vtde.py +160 -0
configuration_vtde.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import VisionTextDualEncoderConfig
|
2 |
+
|
3 |
+
class VTDEConfig(VisionTextDualEncoderConfig):
|
4 |
+
def __init__(self, projection_dim=512, logit_scale_init_value=2.6592,
|
5 |
+
text_pooling_mode='mean',
|
6 |
+
vision_pooling_mode='max',
|
7 |
+
**kwargs):
|
8 |
+
"""
|
9 |
+
pooling_mode in ['mean', 'max', 'cls']
|
10 |
+
https://arxiv.org/pdf/2210.09996.pdf
|
11 |
+
https://github.com/kahnchana/clippy/blob/3c102c29c32f7c66c6e52e09b795fe9c061bbb03/src/open_clip/hf_model.py#L56
|
12 |
+
"""
|
13 |
+
self.text_pooling_mode = text_pooling_mode
|
14 |
+
self.vision_pooling_mode = vision_pooling_mode
|
15 |
+
super().__init__(projection_dim, logit_scale_init_value, **kwargs)
|
modelling_vtde.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AUTOGENERATED! DO NOT EDIT! File to edit: ../notebooks/12_modelling.ipynb.
|
2 |
+
|
3 |
+
# %% auto 0
|
4 |
+
__all__ = ['VTDEConfig', 'VTDE']
|
5 |
+
|
6 |
+
# %% ../notebooks/12_modelling.ipynb 1
|
7 |
+
from transformers.models.clip.modeling_clip import CLIPOutput, clip_loss
|
8 |
+
from typing import Optional, Tuple, Union
|
9 |
+
from transformers import VisionTextDualEncoderConfig, AutoModel, PreTrainedModel, VisionTextDualEncoderModel
|
10 |
+
from .configuration_vtde import VTDEConfig
|
11 |
+
import torch
|
12 |
+
|
13 |
+
|
14 |
+
class VTDE(VisionTextDualEncoderModel):
|
15 |
+
config_class = VTDEConfig
|
16 |
+
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
config: Optional[VTDEConfig] = None,
|
20 |
+
vision_model: Optional[PreTrainedModel] = None,
|
21 |
+
text_model: Optional[PreTrainedModel] = None,
|
22 |
+
):
|
23 |
+
# You can customize the constructor if needed
|
24 |
+
super().__init__(config, vision_model, text_model)
|
25 |
+
self.text_pooling_mode = config.text_pooling_mode
|
26 |
+
self.vision_pooling_mode = config.vision_pooling_mode
|
27 |
+
|
28 |
+
def get_text_features(
|
29 |
+
self,
|
30 |
+
input_ids=None,
|
31 |
+
attention_mask=None,
|
32 |
+
position_ids=None,
|
33 |
+
token_type_ids=None,
|
34 |
+
output_attentions=None,
|
35 |
+
output_hidden_states=None,
|
36 |
+
return_dict=None,
|
37 |
+
):
|
38 |
+
text_outputs = self.text_model(
|
39 |
+
input_ids=input_ids,
|
40 |
+
attention_mask=attention_mask,
|
41 |
+
token_type_ids=token_type_ids,
|
42 |
+
position_ids=position_ids,
|
43 |
+
output_attentions=output_attentions,
|
44 |
+
output_hidden_states=output_hidden_states,
|
45 |
+
return_dict=return_dict,
|
46 |
+
)
|
47 |
+
if self.text_pooling_mode == 'cls':
|
48 |
+
pooled_output = text_outputs[1]
|
49 |
+
elif self.text_pooling_mode == 'mean':
|
50 |
+
pooled_output = torch.mean(text_outputs[0], dim=1)
|
51 |
+
elif self.text_pooling_mode == 'max':
|
52 |
+
pooled_output = torch.max(text_outputs[0], dim=1)[0]
|
53 |
+
elif self.text_pooling_mode == 'norm':
|
54 |
+
"""we select the patch with the largest norm"""
|
55 |
+
last_hidden_states = text_outputs[0]
|
56 |
+
patch_norms = torch.norm(last_hidden_states[:, :-1, :], dim=-1)
|
57 |
+
max_norm_idx = torch.argmax(patch_norms, dim=1)
|
58 |
+
pooled_output = last_hidden_states[:, max_norm_idx, :][:, 0, :]
|
59 |
+
else:
|
60 |
+
"We want to raise the name of the pooling mode"
|
61 |
+
raise NotImplementedError
|
62 |
+
|
63 |
+
text_features = self.text_projection(pooled_output)
|
64 |
+
|
65 |
+
return text_features
|
66 |
+
|
67 |
+
def get_image_features(
|
68 |
+
self,
|
69 |
+
pixel_values=None,
|
70 |
+
output_attentions=None,
|
71 |
+
output_hidden_states=None,
|
72 |
+
return_dict=None,
|
73 |
+
):
|
74 |
+
vision_outputs = self.vision_model(
|
75 |
+
pixel_values=pixel_values,
|
76 |
+
output_attentions=output_attentions,
|
77 |
+
output_hidden_states=output_hidden_states,
|
78 |
+
return_dict=return_dict,
|
79 |
+
)
|
80 |
+
|
81 |
+
if self.vision_pooling_mode == 'cls':
|
82 |
+
pooled_output = vision_outputs[1]
|
83 |
+
elif self.vision_pooling_mode == 'mean':
|
84 |
+
pooled_output = torch.mean(vision_outputs[0], dim=1)
|
85 |
+
elif self.vision_pooling_mode == 'max':
|
86 |
+
pooled_output = torch.max(vision_outputs[0], dim=1)[0]
|
87 |
+
elif self.vision_pooling_mode == 'norm':
|
88 |
+
"""we select the patch with the largest norm"""
|
89 |
+
last_hidden_states = vision_outputs[0]
|
90 |
+
patch_norms = torch.norm(last_hidden_states[:, :-1, :], dim=-1)
|
91 |
+
max_norm_idx = torch.argmax(patch_norms, dim=1)
|
92 |
+
pooled_output = last_hidden_states[:, max_norm_idx, :][:, 0, :]
|
93 |
+
else:
|
94 |
+
raise NotImplementedError
|
95 |
+
|
96 |
+
image_features = self.visual_projection(pooled_output)
|
97 |
+
|
98 |
+
return image_features
|
99 |
+
|
100 |
+
def forward(
|
101 |
+
self,
|
102 |
+
input_ids: Optional[torch.LongTensor] = None,
|
103 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
104 |
+
attention_mask: Optional[torch.Tensor] = None,
|
105 |
+
position_ids: Optional[torch.LongTensor] = None,
|
106 |
+
return_loss: Optional[bool] = None,
|
107 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
108 |
+
output_attentions: Optional[bool] = None,
|
109 |
+
output_hidden_states: Optional[bool] = None,
|
110 |
+
return_dict: Optional[bool] = None,
|
111 |
+
) -> Union[Tuple[torch.Tensor], CLIPOutput]:
|
112 |
+
|
113 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
114 |
+
|
115 |
+
image_embeds = self.get_image_features(
|
116 |
+
pixel_values=pixel_values,
|
117 |
+
output_attentions=output_attentions,
|
118 |
+
output_hidden_states=output_hidden_states,
|
119 |
+
return_dict=return_dict,
|
120 |
+
)
|
121 |
+
|
122 |
+
text_embeds = self.get_text_features(
|
123 |
+
input_ids=input_ids,
|
124 |
+
attention_mask=attention_mask,
|
125 |
+
position_ids=position_ids,
|
126 |
+
output_attentions=output_attentions,
|
127 |
+
output_hidden_states=output_hidden_states,
|
128 |
+
return_dict=return_dict,
|
129 |
+
)
|
130 |
+
|
131 |
+
# normalized features
|
132 |
+
image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
|
133 |
+
text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
|
134 |
+
|
135 |
+
# cosine similarity as logits
|
136 |
+
logit_scale = self.logit_scale.exp()
|
137 |
+
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
|
138 |
+
logits_per_image = logits_per_text.T
|
139 |
+
|
140 |
+
loss = None
|
141 |
+
if return_loss:
|
142 |
+
loss = clip_loss(logits_per_text)
|
143 |
+
|
144 |
+
if not return_dict:
|
145 |
+
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
146 |
+
return ((loss,) + output) if loss is not None else output
|
147 |
+
|
148 |
+
return CLIPOutput(
|
149 |
+
loss=loss,
|
150 |
+
logits_per_image=logits_per_image,
|
151 |
+
logits_per_text=logits_per_text,
|
152 |
+
text_embeds=text_embeds,
|
153 |
+
image_embeds=image_embeds,
|
154 |
+
text_model_output=text_embeds,
|
155 |
+
vision_model_output=image_embeds,
|
156 |
+
)
|
157 |
+
|
158 |
+
VTDEConfig.register_for_auto_class()
|
159 |
+
VTDE.register_for_auto_class("AutoModel")
|
160 |
+
|