asmaa1's picture
Training in progress, epoch 0
dbd18b5
{
"_name_or_path": "MCG-NJU/videomae-base-short-ssv2",
"architectures": [
"VideoMAEForVideoClassification"
],
"attention_probs_dropout_prob": 0.0,
"decoder_hidden_size": 384,
"decoder_intermediate_size": 1536,
"decoder_num_attention_heads": 6,
"decoder_num_hidden_layers": 4,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 768,
"id2label": {
"0": "\u0627\u0644\u0627\u0633\u0641\u0644_0.mp4",
"1": "\u0627\u0644\u0627\u0639\u0644\u0649_0.mp4",
"2": "\u0627\u0644\u0627\u0645\u0627\u0645_0.mp4",
"3": "\u0627\u0644\u062e\u0644\u0641_0.mp4",
"4": "\u0627\u0644\u064a\u0633\u0627\u0631_0.mp4",
"5": "\u0627\u0644\u064a\u0645\u064a\u0646_0.mp4",
"6": "\u062a\u062d\u062a_0.mp4",
"7": "\u062c\u0627\u0646\u0628_0.mp4",
"8": "\u062c\u0646\u0648\u0628_0.mp4",
"9": "\u062d\u0648\u0644_0.mp4",
"10": "\u0634\u0631\u0642_0.mp4",
"11": "\u0634\u0645\u0627\u0644_0.mp4",
"12": "\u0639\u0646\u062f_0.mp4",
"13": "\u063a\u0631\u0628_0.mp4",
"14": "\u0641\u0648\u0642_0.mp4",
"15": "\u0642\u0645\u0629_0.mp4",
"16": "\u0645\u0642\u0627\u0628\u0644_0.mp4"
},
"image_size": 224,
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"\u0627\u0644\u0627\u0633\u0641\u0644_0.mp4": 0,
"\u0627\u0644\u0627\u0639\u0644\u0649_0.mp4": 1,
"\u0627\u0644\u0627\u0645\u0627\u0645_0.mp4": 2,
"\u0627\u0644\u062e\u0644\u0641_0.mp4": 3,
"\u0627\u0644\u064a\u0633\u0627\u0631_0.mp4": 4,
"\u0627\u0644\u064a\u0645\u064a\u0646_0.mp4": 5,
"\u062a\u062d\u062a_0.mp4": 6,
"\u062c\u0627\u0646\u0628_0.mp4": 7,
"\u062c\u0646\u0648\u0628_0.mp4": 8,
"\u062d\u0648\u0644_0.mp4": 9,
"\u0634\u0631\u0642_0.mp4": 10,
"\u0634\u0645\u0627\u0644_0.mp4": 11,
"\u0639\u0646\u062f_0.mp4": 12,
"\u063a\u0631\u0628_0.mp4": 13,
"\u0641\u0648\u0642_0.mp4": 14,
"\u0642\u0645\u0629_0.mp4": 15,
"\u0645\u0642\u0627\u0628\u0644_0.mp4": 16
},
"layer_norm_eps": 1e-12,
"model_type": "videomae",
"norm_pix_loss": true,
"num_attention_heads": 12,
"num_channels": 3,
"num_frames": 16,
"num_hidden_layers": 12,
"patch_size": 16,
"problem_type": "single_label_classification",
"qkv_bias": true,
"torch_dtype": "float32",
"transformers_version": "4.33.2",
"tubelet_size": 2,
"use_mean_pooling": false
}