Spaces:
Running
on
Zero
Running
on
Zero
model: | |
arch: mini_gpt4_llama_v2 | |
model_type: pretrain_vicuna | |
llama_model: "mistralai/Mistral-7B-Instruct-v0.2" | |
max_txt_len: 160 | |
max_context_len: 512 | |
datasets: | |
laion: | |
batch_size: 64 | |
vis_processor: | |
train: | |
name: "blip2_image_train" | |
image_size: 224 | |
text_processor: | |
train: | |
name: "blip_caption" | |
sample_ratio: 115 | |
cc_sbu: | |
batch_size: 64 | |
vis_processor: | |
train: | |
name: "blip2_image_train" | |
image_size: 224 | |
text_processor: | |
train: | |
name: "blip_caption" | |
sample_ratio: 14 | |
run: | |
task: image_text_pretrain | |
# optimizer | |
lr_sched: "linear_warmup_cosine_lr" | |
init_lr: 1e-4 | |
min_lr: 8e-5 | |
warmup_lr: 1e-6 | |
weight_decay: 0.05 | |
max_epoch: 4 | |
num_workers: 4 | |
warmup_steps: 5000 | |
iters_per_epoch: 5000 | |
seed: 42 | |
output_dir: "output/minigpt4_stage1_pretrain_mistral" | |
amp: True | |
resume_ckpt_path: null | |
evaluate: False | |
train_splits: ["train"] | |
device: "cuda" | |
world_size: 1 | |
dist_url: "env://" | |
distributed: True | |
wandb_log: True | |
job_name: minigpt4_mistral_pretrain | |