MiniGPT4-video / train_configs /224_minigpt4_llama2_image.yaml
Vision-CAIR's picture
apply_latest_updates
a6e7156
model:
arch: mini_gpt4_llama_v2
model_type: pretrain_vicuna
llama_model: "meta-llama/Llama-2-7b-chat-hf"
max_txt_len: 160
max_context_len: 512
datasets:
laion:
batch_size: 64
vis_processor:
train:
name: "blip2_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
sample_ratio: 115
cc_sbu:
batch_size: 64
vis_processor:
train:
name: "blip2_image_train"
image_size: 224
text_processor:
train:
name: "blip_caption"
sample_ratio: 14
run:
task: image_text_pretrain
# optimizer
lr_sched: "linear_warmup_cosine_lr"
init_lr: 1e-4
min_lr: 8e-5
warmup_lr: 1e-6
weight_decay: 0.05
max_epoch: 4
num_workers: 4
warmup_steps: 5000
iters_per_epoch: 5000
seed: 42
output_dir: "output/minigpt4_stage1_pretrain_llama2"
amp: True
resume_ckpt_path: null
evaluate: False
train_splits: ["train"]
device: "cuda"
world_size: 1
dist_url: "env://"
distributed: True
wandb_log: True
job_name: minigpt4_llama2_pretrain