File size: 4,462 Bytes
63dd1a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
030fbc6
63dd1a3
 
 
 
 
 
 
 
 
 
 
030fbc6
63dd1a3
98fb2b8
63dd1a3
98fb2b8
63dd1a3
 
 
98fb2b8
 
 
 
 
 
 
 
 
 
030fbc6
98fb2b8
 
 
63dd1a3
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import torch
import torch.nn as nn
import transformers
from torch.utils.data import Dataset
from transformers import ViTFeatureExtractor
from io import BytesIO
from base64 import b64decode
from PIL import Image
from accelerate import Accelerator
import base64
from config import get_config
from pathlib import Path
from tokenizers import Tokenizer
from tokenizers.models import WordLevel
from tokenizers.trainers import WordLevelTrainer
from tokenizers.pre_tokenizers import Whitespace
from model import build_transformer
import torch.nn.functional as F
from transformers import GPT2TokenizerFast
import streamlit as st

def process(model,image, tokenizer, device):
    image = get_image(image)
    model.eval()
    with torch.no_grad():
        encoder_input = image.unsqueeze(0).to(device) # (b, seq_len)
        model_out = greedy_decode(model, encoder_input, None, tokenizer, 196,device)
        model_text  = tokenizer.decode(model_out.detach().cpu().numpy())
        return model_text





       



# get image prompt 
def get_image(image):
# import model
    model_id = 'google/vit-base-patch16-224-in21k'
    feature_extractor = ViTFeatureExtractor.from_pretrained(
    model_id
    )

    
    image  = Image.open(image)

    if image.mode != 'RGB':
        image = image.convert('RGB')
        
    enc_input = feature_extractor(
    image,
    return_tensors='pt'
    )

    return enc_input['pixel_values'].squeeze(0).squeeze(0).squeeze(0).squeeze(0).squeeze(0)

    


#get tokenizer
def get_or_build_tokenizer(config):
    tokenizer = GPT2TokenizerFast.from_pretrained("openai-community/gpt2", unk_token ='[UNK]', bos_token = '[SOS]', eos_token = '[EOS]' , pad_token = '[PAD]')
    return tokenizer


def causal_mask(size):
    mask = torch.triu(torch.ones((1, size, size)), diagonal=1).type(torch.int)
    return mask == 0


# get model
def get_model(config, vocab_tgt_len):
    model = build_transformer(vocab_tgt_len, config['seq_len'], d_model=config['d_model'])
    return model

# greedy decode 
def greedy_decode(model, source, source_mask, tokenizer_tgt, max_len, device):
    sos_idx = tokenizer_tgt.convert_tokens_to_ids('[SOS]')
    eos_idx = tokenizer_tgt.convert_tokens_to_ids('[EOS]')

    # Precompute the encoder output and reuse it for every step
    encoder_output = model.encode(source, None)
   
    # Initialize the decoder input with the sos token
    decoder_input = torch.empty(1, 1).fill_(sos_idx).long().to(device)
    while True:
        if decoder_input.size(1) == max_len:
            break

        # build mask for target
        decoder_mask = causal_mask(decoder_input.size(1)).long().to(device)
      

        # calculate output
        out = model.decode(encoder_output, source_mask, decoder_input, decoder_mask)
        # print(f'out: {out.shape}')

        # Get next token probabilities with temperature applied
        logits = model.project(out[:, -1]) 
        probabilities = F.softmax(logits, dim=-1)

        # Greedily select the next word
        next_word = torch.argmax(probabilities, dim=1)
        
        # Append next word
        decoder_input = torch.cat([decoder_input, next_word.unsqueeze(0)], dim=1)

        if next_word.item() == eos_idx:
            break

    return decoder_input.squeeze(0)

def image_base64(image):
    

 
    base64_bytes = base64.b64encode(image_file.read())
       

    base64_string = base64_bytes.decode()
    return base64_string





def main():
    st.title("Image Captioning with Vision Transformer")
    image = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
    
    if image is not None:
        st.image(image, use_column_width=True)
        # image_bytes = uploaded_file.getvalue()
        # image = image_base64(image_bytes)
        # image = get_image(uploaded_file)
        with st.empty():
            st.write("Processing the image... Please wait.")
            accelerator = Accelerator()
            device = accelerator.device
            # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            config = get_config()
            tokenizer = get_or_build_tokenizer(config)
            model = get_model(config, len(tokenizer))
            model = accelerator.prepare(model)
            accelerator.load_state('models/')
         
            
           
            text_output = process(model, image, tokenizer, device)
        st.write(text_output)

if __name__ == "__main__":
    main()