|
{ |
|
"version": "0.9.0.dev2024020600", |
|
"pretrained_config": { |
|
"architecture": "PhiForCausalLM", |
|
"dtype": "float16", |
|
"logits_dtype": "float32", |
|
"vocab_size": 51200, |
|
"max_position_embeddings": 2048, |
|
"hidden_size": 2560, |
|
"num_hidden_layers": 32, |
|
"num_attention_heads": 32, |
|
"num_key_value_heads": 32, |
|
"head_size": 80, |
|
"hidden_act": "gelu_new", |
|
"intermediate_size": 10240, |
|
"norm_epsilon": 1e-05, |
|
"position_embedding_type": "learned_absolute", |
|
"use_prompt_tuning": false, |
|
"use_parallel_embedding": false, |
|
"embedding_sharding_dim": 0, |
|
"share_embedding_table": false, |
|
"mapping": { |
|
"world_size": 1, |
|
"tp_size": 1, |
|
"pp_size": 1 |
|
}, |
|
"kv_dtype": "float16", |
|
"max_lora_rank": 64, |
|
"partial_rotary_factor": 0.4, |
|
"rope_theta": 10000.0, |
|
"rotary_base": 10000.0, |
|
"quantization": { |
|
"quant_algo": null, |
|
"kv_cache_quant_algo": null, |
|
"group_size": 128, |
|
"has_zero_point": false, |
|
"pre_quant_scale": false, |
|
"exclude_modules": null, |
|
"sq_use_plugin": false |
|
} |
|
}, |
|
"build_config": { |
|
"max_input_len": 1024, |
|
"max_output_len": 1024, |
|
"max_batch_size": 1, |
|
"max_beam_width": 1, |
|
"max_num_tokens": 1024, |
|
"max_prompt_embedding_table_size": 0, |
|
"gather_context_logits": false, |
|
"gather_generation_logits": false, |
|
"strongly_typed": false, |
|
"builder_opt": null, |
|
"profiling_verbosity": "layer_names_only", |
|
"plugin_config": { |
|
"bert_attention_plugin": "float16", |
|
"gpt_attention_plugin": "float16", |
|
"gemm_plugin": "float16", |
|
"smooth_quant_gemm_plugin": null, |
|
"identity_plugin": null, |
|
"layernorm_quantization_plugin": null, |
|
"rmsnorm_quantization_plugin": null, |
|
"nccl_plugin": null, |
|
"lookup_plugin": null, |
|
"lora_plugin": null, |
|
"weight_only_groupwise_quant_matmul_plugin": null, |
|
"weight_only_quant_matmul_plugin": null, |
|
"quantize_per_token_plugin": false, |
|
"quantize_tensor_plugin": false, |
|
"context_fmha": true, |
|
"context_fmha_fp32_acc": false, |
|
"paged_kv_cache": true, |
|
"remove_input_padding": true, |
|
"use_custom_all_reduce": true, |
|
"multi_block_mode": false, |
|
"enable_xqa": true, |
|
"attention_qk_half_accumulation": false, |
|
"tokens_per_block": 128, |
|
"use_paged_context_fmha": false, |
|
"use_context_fmha_for_generation": false |
|
} |
|
} |
|
} |