{ | |
"architectures": [ | |
"Qwen2VisionTransformerPretrainedModel" | |
], | |
"depth": 32, | |
"embed_dim": 1280, | |
"hidden_act": "quick_gelu", | |
"hidden_size": 3584, | |
"in_channels": 3, | |
"in_chans": 3, | |
"mlp_ratio": 4, | |
"model_type": "qwen2_vl", | |
"num_heads": 16, | |
"patch_size": 14, | |
"spatial_merge_size": 2, | |
"spatial_patch_size": 14, | |
"temporal_patch_size": 2, | |
"torch_dtype": "float32", | |
"transformers_version": "4.47.0.dev0" | |
} | |