malra's picture
End of training
67a1332
raw
history blame
2.94 kB
{
"_name_or_path": "nvidia/mit-b0",
"architectures": [
"SegformerForSemanticSegmentation"
],
"attention_probs_dropout_prob": 0.0,
"classifier_dropout_prob": 0.1,
"decoder_hidden_size": 256,
"depths": [
2,
2,
2,
2
],
"downsampling_rates": [
1,
4,
8,
16
],
"drop_path_rate": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_sizes": [
32,
64,
160,
256
],
"id2label": {
"0": "unlabeled",
"1": "flat-road",
"2": "flat-sidewalk",
"3": "flat-crosswalk",
"4": "flat-cyclinglane",
"5": "flat-parkingdriveway",
"6": "flat-railtrack",
"7": "flat-curb",
"8": "human-person",
"9": "human-rider",
"10": "vehicle-car",
"11": "vehicle-truck",
"12": "vehicle-bus",
"13": "vehicle-tramtrain",
"14": "vehicle-motorcycle",
"15": "vehicle-bicycle",
"16": "vehicle-caravan",
"17": "vehicle-cartrailer",
"18": "construction-building",
"19": "construction-door",
"20": "construction-wall",
"21": "construction-fenceguardrail",
"22": "construction-bridge",
"23": "construction-tunnel",
"24": "construction-stairs",
"25": "object-pole",
"26": "object-trafficsign",
"27": "object-trafficlight",
"28": "nature-vegetation",
"29": "nature-terrain",
"30": "sky",
"31": "void-ground",
"32": "void-dynamic",
"33": "void-static",
"34": "void-unclear"
},
"image_size": 224,
"initializer_range": 0.02,
"label2id": {
"construction-bridge": 22,
"construction-building": 18,
"construction-door": 19,
"construction-fenceguardrail": 21,
"construction-stairs": 24,
"construction-tunnel": 23,
"construction-wall": 20,
"flat-crosswalk": 3,
"flat-curb": 7,
"flat-cyclinglane": 4,
"flat-parkingdriveway": 5,
"flat-railtrack": 6,
"flat-road": 1,
"flat-sidewalk": 2,
"human-person": 8,
"human-rider": 9,
"nature-terrain": 29,
"nature-vegetation": 28,
"object-pole": 25,
"object-trafficlight": 27,
"object-trafficsign": 26,
"sky": 30,
"unlabeled": 0,
"vehicle-bicycle": 15,
"vehicle-bus": 12,
"vehicle-car": 10,
"vehicle-caravan": 16,
"vehicle-cartrailer": 17,
"vehicle-motorcycle": 14,
"vehicle-tramtrain": 13,
"vehicle-truck": 11,
"void-dynamic": 32,
"void-ground": 31,
"void-static": 33,
"void-unclear": 34
},
"layer_norm_eps": 1e-06,
"mlp_ratios": [
4,
4,
4,
4
],
"model_type": "segformer",
"num_attention_heads": [
1,
2,
5,
8
],
"num_channels": 3,
"num_encoder_blocks": 4,
"patch_sizes": [
7,
3,
3,
3
],
"reshape_last_stage": true,
"semantic_loss_ignore_index": 255,
"sr_ratios": [
8,
4,
2,
1
],
"strides": [
4,
2,
2,
2
],
"torch_dtype": "float32",
"transformers_version": "4.19.2"
}