model: arch: mini_gpt4 # vit encoder image_size: 224 drop_path_rate: 0 use_grad_checkpoint: False vit_precision: "fp16" freeze_vit: True freeze_qformer: True # Q-Former num_query_token: 32 # Vicuna # llama_model: "/home/ubuntu/ckpt/hf/Llama-2-7b-chat-hf" # llama_model: "/home/ubuntu/ckpt/hf/vicuna-7b-v1.5" # "/home/ubuntu/ckpt/hf/vicuna-13b-delta-v0" llama_model: "/home/ubuntu/ckpt/Meta-Llama-3-8B-Instruct-hf" # generation configs prompt: "" preprocess: vis_processor: train: name: "blip2_image_train" image_size: 224 eval: name: "blip2_image_eval" image_size: 224 text_processor: train: name: "blip_caption" eval: name: "blip_caption"