File size: 1,533 Bytes
1d9166f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
dataset:
  align_stage_components:
  - download/llava-laion-cc-sbu-558k/chat.json
  - download/llava-laion-cc-sbu-558k
  dataset_id: llava-v15
  dataset_root_dir: data
  finetune_stage_components:
  - download/llava-v1.5-instruct/llava_v1_5_mix665k.json
  - download/llava-v1.5-instruct
  type: llava-v15
hf_token: .hf_token
model:
  align_epochs: 1
  align_global_batch_size: 256
  align_learning_rate: 0.001
  align_lr_scheduler_type: linear-warmup+cosine-decay
  align_max_grad_norm: 1.0
  align_max_steps: null
  align_per_device_batch_size: 16
  align_train_strategy: fsdp-shard-grad-op
  align_warmup_ratio: 0.03
  align_weight_decay: 0.0
  arch_specifier: no-align+gelu-mlp
  enable_gradient_checkpointing: true
  enable_mixed_precision_training: true
  finetune_epochs: 1
  finetune_global_batch_size: 128
  finetune_learning_rate: 2.0e-05
  finetune_lr_scheduler_type: linear-warmup+cosine-decay
  finetune_max_grad_norm: 1.0
  finetune_max_steps: null
  finetune_per_device_batch_size: 4
  finetune_train_strategy: fsdp-full-shard
  finetune_warmup_ratio: 0.03
  finetune_weight_decay: 0.1
  image_resize_strategy: letterbox
  llm_backbone_id: llama3-8b-instruct
  llm_max_length: 2048
  model_id: llama3-instruct+8b+siglip
  reduce_in_full_precision: false
  type: one-stage+7b
  vision_backbone_id: siglip-vit-so400m-384px
pretrained_checkpoint: null
run_id: llama3-instruct+8b+siglip+stage-finetune+x7
run_root_dir: runs
seed: 7
stage: finetune
trackers:
- jsonl
- wandb
wandb_entity: rylan
wandb_project: prismatic-vlm