|
|
|
|
|
|
|
|
|
|
|
model:
|
|
arch: pnp_vqa
|
|
model_type: large
|
|
|
|
image_question_matching_model:
|
|
arch: blip_image_text_matching
|
|
load_finetuned: True
|
|
|
|
finetuned: "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_retrieval_coco_train2014.pth"
|
|
|
|
|
|
vit_type: "large"
|
|
vit_grad_ckpt: False
|
|
vit_ckpt_layer: 0
|
|
|
|
image_size: 384
|
|
|
|
|
|
med_config_path: "configs/models/med_large_config.json"
|
|
|
|
embed_dim: 256
|
|
|
|
image_captioning_model:
|
|
arch: blip_caption
|
|
load_finetuned: True
|
|
|
|
finetuned: "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption_coco_train2014.pth"
|
|
|
|
vit_type: "large"
|
|
vit_grad_ckpt: True
|
|
vit_ckpt_layer: 5
|
|
|
|
image_size: 384
|
|
|
|
|
|
med_config_path: "configs/models/med_large_config.json"
|
|
|
|
|
|
prompt: "a picture of "
|
|
|
|
question_answering_model:
|
|
arch: pnp_unifiedqav2_fid
|
|
|
|
pretrained: "allenai/unifiedqa-v2-t5-large-1363200"
|
|
|
|
t5_config_path: "configs/models/pnp-vqa/unifiedqav2_large_config.json"
|
|
|
|
preprocess:
|
|
vis_processor:
|
|
eval:
|
|
name: "blip_image_eval"
|
|
image_size: 384
|
|
text_processor:
|
|
eval:
|
|
name: "blip_caption"
|
|
|