name: flexTokenizer | |
embedding_dim: 64 | |
hidden_channels: 128 | |
channel_multipliers: | |
- 1 | |
- 2 | |
- 2 | |
- 4 | |
encoder_layer_configs: | |
- 2 | |
- 2 | |
- 2 | |
- 2 | |
- 2 | |
decoder_layer_configs: | |
- 2 | |
- 2 | |
- 2 | |
- 2 | |
- 2 | |
use_adaptive_norm: true | |
use_learnable_up_down_sample: true | |
quantizer_config: | |
quantize_type: gvq | |
embed_dim: 64 | |
num_embed: 8192 | |
commitment_loss_weight: 0.25 | |
use_l2_norm: true | |
use_uniform_init: false | |
num_group: 16 | |