File size: 1,599 Bytes
89d090e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
{
"_class_name": "AutoencoderKL",
"_commit_hash": null,
"_diffusers_version": "0.29.1",
"_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--hf-internal-testing--tiny-stable-diffusion-torch/snapshots/a88cdfbd91f96ec7f61eb7484b652ff0f4ee701d/vae",
"_use_default_values": [
"scaling_factor",
"use_post_quant_conv",
"force_upcast",
"latents_mean",
"latents_std",
"shift_factor",
"use_quant_conv"
],
"act_fn": "silu",
"block_out_channels": [
32,
64
],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D"
],
"force_upcast": true,
"in_channels": 3,
"latent_channels": 4,
"latents_mean": null,
"latents_std": null,
"layers_per_block": 1,
"neuron": {
"auto_cast": "matmul",
"auto_cast_type": "bf16",
"compiler_type": "neuronx-cc",
"compiler_version": "2.13.66.0+6dfecc895",
"dynamic_batch_size": false,
"inline_weights_to_neff": true,
"input_names": [
"sample"
],
"model_type": "vae-encoder",
"optlevel": "2",
"output_attentions": false,
"output_hidden_states": false,
"output_names": [
"latent_sample"
],
"static_batch_size": 1,
"static_height": 64,
"static_num_channels": 3,
"static_width": 64
},
"norm_num_groups": 32,
"out_channels": 3,
"sample_size": 128,
"scaling_factor": 0.18215,
"shift_factor": null,
"task": "semantic-segmentation",
"transformers_version": null,
"up_block_types": [
"UpDecoderBlock2D",
"UpDecoderBlock2D"
],
"use_post_quant_conv": true,
"use_quant_conv": true
}
|