{ "_name_or_path": "openai/clip-vit-base-patch32", "architectures": [ "CLIPModel" ], "model_type": "clip", "projection_dim": 512, "vision_config": { "timm_model_name": "vit_base_patch16_224", "timm_model_type": "clip_vision_model", "timm_model_pretrained": false, "timm_pool": "", "timm_proj": "linear", "image_size": 224 }, "text_config": { "hf_model_name": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract", "hf_tokenizer_name": "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract", "proj": "mlp", "pooler_type": "cls_last_hidden_state_pooler", "context_length": 256 } , "preprocess_cfg": { "mean": [ 0.48145466, 0.4578275, 0.40821073 ], "std": [ 0.26862954, 0.26130258, 0.27577711 ] } }