OTTER-MPT7B-Init / pytorch_model.bin.index.json
luodian's picture
update
2bc68d1
{
"metadata": {
"total_size": 32525207624
},
"weight_map": {
"lang_encoder.transformer.blocks.0.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.0.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.0.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.0.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.0.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.0.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.1.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.1.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.1.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.1.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.1.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.1.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.10.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.10.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.10.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.10.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.10.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.10.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.attn_gate": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.11.gated_cross_attn_layer.ff_gate": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.12.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.12.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.12.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.12.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.12.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.12.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.13.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.13.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.13.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.13.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.13.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.13.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.14.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.14.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.14.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.14.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.14.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.14.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.attn_gate": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.15.gated_cross_attn_layer.ff_gate": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.16.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.16.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.16.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.16.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.16.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.16.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.17.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.17.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.17.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.17.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.17.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.17.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.18.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.18.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.18.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.18.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.18.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.18.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.decoder_layer.ffn.down_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.decoder_layer.ffn.up_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.attn_gate": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.19.gated_cross_attn_layer.ff_gate": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.2.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.2.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.2.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.2.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.2.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.2.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.20.decoder_layer.attn.Wqkv.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.20.decoder_layer.attn.out_proj.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.20.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.20.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.20.decoder_layer.norm_1.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.20.decoder_layer.norm_2.weight": "pytorch_model-00002-of-00004.bin",
"lang_encoder.transformer.blocks.21.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.21.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.21.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.21.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.21.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.21.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.22.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.22.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.22.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.22.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.22.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.22.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.attn_gate": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.23.gated_cross_attn_layer.ff_gate": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.24.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.24.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.24.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.24.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.24.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.24.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.25.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.25.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.25.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.25.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.25.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.25.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.26.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.26.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.26.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.26.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.26.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.26.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.attn_gate": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.27.gated_cross_attn_layer.ff_gate": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.28.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.28.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.28.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.28.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.28.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.28.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.29.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.29.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.29.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.29.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.29.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.29.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.3.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.attn_gate": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.3.gated_cross_attn_layer.ff_gate": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.30.decoder_layer.attn.Wqkv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.30.decoder_layer.attn.out_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.30.decoder_layer.ffn.down_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.30.decoder_layer.ffn.up_proj.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.30.decoder_layer.norm_1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.30.decoder_layer.norm_2.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.decoder_layer.attn.Wqkv.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.blocks.31.decoder_layer.attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.blocks.31.decoder_layer.ffn.down_proj.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.blocks.31.decoder_layer.ffn.up_proj.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.blocks.31.decoder_layer.norm_1.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.blocks.31.decoder_layer.norm_2.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.attn_gate": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.blocks.31.gated_cross_attn_layer.ff_gate": "pytorch_model-00003-of-00004.bin",
"lang_encoder.transformer.blocks.4.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.4.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.4.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.4.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.4.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.4.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.5.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.5.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.5.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.5.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.5.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.5.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.6.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.6.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.6.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.6.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.6.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.6.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.attn.norm.bias": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.attn.norm.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.attn.to_kv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.attn.to_out.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.attn.to_q.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.attn_gate": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.feed_forward.0.bias": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.feed_forward.0.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.feed_forward.1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.feed_forward.3.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.7.gated_cross_attn_layer.ff_gate": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.8.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.8.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.8.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.8.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.8.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.8.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.9.decoder_layer.attn.Wqkv.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.9.decoder_layer.attn.out_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.9.decoder_layer.ffn.down_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.9.decoder_layer.ffn.up_proj.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.9.decoder_layer.norm_1.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.blocks.9.decoder_layer.norm_2.weight": "pytorch_model-00001-of-00004.bin",
"lang_encoder.transformer.norm_f.weight": "pytorch_model-00004-of-00004.bin",
"lang_encoder.transformer.wte.weight": "pytorch_model-00001-of-00004.bin",
"perceiver.latents": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.feed_forward.0.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.feed_forward.0.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.feed_forward.1.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.feed_forward.3.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.norm_latents.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.norm_latents.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.norm_media.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.norm_media.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.to_kv.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.to_out.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.0.to_q.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.feed_forward.0.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.feed_forward.0.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.feed_forward.1.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.feed_forward.3.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.norm_latents.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.norm_latents.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.norm_media.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.norm_media.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.to_kv.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.to_out.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.1.to_q.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.feed_forward.0.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.feed_forward.0.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.feed_forward.1.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.feed_forward.3.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.norm_latents.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.norm_latents.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.norm_media.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.norm_media.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.to_kv.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.to_out.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.2.to_q.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.feed_forward.0.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.feed_forward.0.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.feed_forward.1.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.feed_forward.3.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.norm_latents.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.norm_latents.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.norm_media.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.norm_media.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.to_kv.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.to_out.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.3.to_q.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.feed_forward.0.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.feed_forward.0.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.feed_forward.1.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.feed_forward.3.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.norm_latents.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.norm_latents.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.norm_media.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.norm_media.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.to_kv.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.to_out.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.4.to_q.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.feed_forward.0.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.feed_forward.0.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.feed_forward.1.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.feed_forward.3.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.norm_latents.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.norm_latents.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.norm_media.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.norm_media.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.to_kv.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.to_out.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.layers.5.to_q.weight": "pytorch_model-00004-of-00004.bin",
"perceiver.norm.bias": "pytorch_model-00004-of-00004.bin",
"perceiver.norm.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.embeddings.class_embedding": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.embeddings.patch_embedding.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.embeddings.position_embedding.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.embeddings.position_ids": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.layer_norm2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.mlp.fc1.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.mlp.fc1.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.mlp.fc2.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.encoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.post_layernorm.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.post_layernorm.weight": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.pre_layrnorm.bias": "pytorch_model-00004-of-00004.bin",
"vision_encoder.vision_model.pre_layrnorm.weight": "pytorch_model-00004-of-00004.bin"
}
}