|
|
|
|
|
|
|
|
|
|
|
model:
|
|
arch: instruct_vicuna7b
|
|
load_finetuned: False
|
|
load_pretrained: True
|
|
|
|
pretrained: "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/InstructBLIP/instruct_blip_vicuna7b_trimmed.pth"
|
|
finetuned: ""
|
|
|
|
|
|
image_size: 224
|
|
drop_path_rate: 0
|
|
use_grad_checkpoint: False
|
|
vit_precision: "fp16"
|
|
freeze_vit: True
|
|
|
|
|
|
num_query_token: 32
|
|
|
|
|
|
llm_model: "./llm/vicuna-7b"
|
|
|
|
|
|
prompt: ""
|
|
|
|
|
|
preprocess:
|
|
vis_processor:
|
|
train:
|
|
name: "blip2_image_train"
|
|
image_size: 224
|
|
eval:
|
|
name: "blip_image_eval"
|
|
image_size: 224
|
|
text_processor:
|
|
train:
|
|
name: "blip_caption"
|
|
eval:
|
|
name: "blip_caption"
|
|
|