mmlm-fixed-10k / config.json
voidful's picture
Upload folder using huggingface_hub
b2c4ea8 verified
raw
history blame contribute delete
375 Bytes
{
"architectures": [
"MMLM"
],
"codebook_size": 2048,
"lm_model_name": "voidful/Llama-3.2-8B-Whisper",
"model_type": "mmlm",
"num_heads": 8,
"queue_duration": 3600,
"queue_length": 86400000,
"sampling_rate": 24000,
"speaker_emb_dim": 192,
"step_duration": 0.08,
"step_size": 1920,
"torch_dtype": "bfloat16",
"transformers_version": "4.47.0"
}