|
|
|
|
|
|
|
|
|
|
|
model:
|
|
arch: blip_vqa
|
|
load_finetuned: True
|
|
|
|
finetuned: "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
|
|
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
|
|
|
|
|
|
vit_type: "base"
|
|
vit_grad_ckpt: False
|
|
vit_ckpt_layer: 0
|
|
vit_drop_path_rate: 0.1
|
|
|
|
image_size: 480
|
|
|
|
|
|
med_config_path: "configs/models/med_config.json"
|
|
|
|
preprocess:
|
|
vis_processor:
|
|
train:
|
|
name: "blip_image_train"
|
|
image_size: 480
|
|
eval:
|
|
name: "blip_image_eval"
|
|
image_size: 480
|
|
text_processor:
|
|
train:
|
|
name: "blip_question"
|
|
eval:
|
|
name: "blip_question"
|
|
|