{ "_name_or_path": "google/vit-base-patch16-224-in21k", "architectures": [ "ViTForImageClassification" ], "attention_probs_dropout_prob": 0.0, "encoder_stride": 16, "finetuning_task": "image-classification", "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_size": 768, "id2label": { "0": "advertisement", "1": "budget", "10": "presentation", "11": "questionnaire", "12": "resume", "13": "scientific publication", "14": "scientific report", "15": "specification", "2": "email", "3": "file folder", "4": "form", "5": "handwritten", "6": "invoice", "7": "letter", "8": "memo", "9": "news article" }, "image_size": 224, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "advertisement": "0", "budget": "1", "email": "2", "file folder": "3", "form": "4", "handwritten": "5", "invoice": "6", "letter": "7", "memo": "8", "news article": "9", "presentation": "10", "questionnaire": "11", "resume": "12", "scientific publication": "13", "scientific report": "14", "specification": "15" }, "layer_norm_eps": 1e-12, "model_type": "vit", "num_attention_heads": 12, "num_channels": 3, "num_hidden_layers": 12, "patch_size": 16, "problem_type": "single_label_classification", "qkv_bias": true, "torch_dtype": "float32", "transformers_version": "4.45.2" }