Transformers
PyTorch
English
bridgetower
Inference Endpoints
bridgetower-base / preprocessor_config.json
anahita-b's picture
Update vit model name and add image_size
f812c88
raw
history blame
1.62 kB
{
"cache_dir":"/tmp",
"downstream_fusion":false,
"downstream_fusion_layers":1,
"downstream_fusion_method":"elmo",
"drop_rate":0.1,
"freeze_RoBERTa":false,
"freeze_ViT":false,
"freeze_layer_count_roberta":false,
"freeze_layer_count_vit":false,
"head_hidden_scale":2,
"hidden_size":768,
"image_size":288,
"input_text_embed_size":768,
"link_tower_shared":false,
"link_tower_type":"add",
"log_dir":"log_dir",
"loss_names":{"contras": 0,
"irtr": 0,
"itm": 0,
"mlm": 0,
"mpp": 0,
"nlvr2": 0,
"snli": 0,
"vcr": 0,
"vcr_qar": 0,
"vqa": 1},
"max_text_len":50,
"mlp_ratio":4,
"model_type":"bridgetower",
"num_heads":12,
"num_layers":6,
"num_nodes":1,
"only_load_cross_modal_from_meter":false,
"patch_size":16,
"resolution_before":224,
"stop_gradient":false,
"task_head_layers":2,
"test_only":false,
"tokenizer":"roberta-base",
"unfreeze_RoBERTa_attention":false,
"unfreeze_RoBERTa_embeddings":false,
"unfreeze_RoBERTa_encoder":false,
"unfreeze_RoBERTa_layernorm":false,
"unfreeze_ViT_attention":false,
"unfreeze_ViT_layernorm":false,
"vit":"ViT-B/16",
"vit_layernorm_init_from_vit":false,
"vit_layernorm_shared":true,
"vit_remove_last":false,
"vocab_size":50265
}