zeroshot commited on
Commit
7b6db61
1 Parent(s): 7ccb1c0

Delete config.json

Browse files
Files changed (1) hide show
  1. config.json +0 -114
config.json DELETED
@@ -1,114 +0,0 @@
1
- {
2
- "version": "0.9.0.dev2024022700",
3
- "pretrained_config": {
4
- "architecture": "MistralForCausalLM",
5
- "dtype": "float16",
6
- "logits_dtype": "float32",
7
- "vocab_size": 32000,
8
- "max_position_embeddings": 32768,
9
- "hidden_size": 4096,
10
- "num_hidden_layers": 32,
11
- "num_attention_heads": 32,
12
- "num_key_value_heads": 8,
13
- "head_size": 128,
14
- "hidden_act": "silu",
15
- "intermediate_size": 14336,
16
- "norm_epsilon": 1e-05,
17
- "position_embedding_type": "rope_gpt_neox",
18
- "use_prompt_tuning": false,
19
- "use_parallel_embedding": false,
20
- "embedding_sharding_dim": 0,
21
- "share_embedding_table": false,
22
- "mapping": {
23
- "world_size": 1,
24
- "tp_size": 1,
25
- "pp_size": 1
26
- },
27
- "kv_dtype": "float16",
28
- "max_lora_rank": 64,
29
- "rotary_base": 10000.0,
30
- "rotary_scaling": null,
31
- "moe_num_experts": 0,
32
- "moe_top_k": 0,
33
- "moe_tp_mode": 2,
34
- "moe_normalization_mode": 1,
35
- "enable_pos_shift": false,
36
- "dense_context_fmha": false,
37
- "lora_target_modules": null,
38
- "hf_modules_to_trtllm_modules": {
39
- "q_proj": "attn_q",
40
- "k_proj": "attn_k",
41
- "v_proj": "attn_v",
42
- "o_proj": "attn_dense",
43
- "gate_proj": "mlp_h_to_4h",
44
- "down_proj": "mlp_4h_to_h",
45
- "up_proj": "mlp_gate"
46
- },
47
- "trtllm_modules_to_hf_modules": {
48
- "attn_q": "q_proj",
49
- "attn_k": "k_proj",
50
- "attn_v": "v_proj",
51
- "attn_dense": "o_proj",
52
- "mlp_h_to_4h": "gate_proj",
53
- "mlp_4h_to_h": "down_proj",
54
- "mlp_gate": "up_proj"
55
- },
56
- "disable_weight_only_quant_plugin": false,
57
- "mlp_bias": false,
58
- "attn_bias": false,
59
- "quantization": {
60
- "quant_algo": null,
61
- "kv_cache_quant_algo": null,
62
- "group_size": 128,
63
- "has_zero_point": false,
64
- "pre_quant_scale": false,
65
- "exclude_modules": [
66
- "lm_head"
67
- ],
68
- "sq_use_plugin": false
69
- }
70
- },
71
- "build_config": {
72
- "max_input_len": 32256,
73
- "max_output_len": 1024,
74
- "max_batch_size": 1,
75
- "max_beam_width": 1,
76
- "max_num_tokens": 32256,
77
- "max_prompt_embedding_table_size": 0,
78
- "gather_context_logits": false,
79
- "gather_generation_logits": false,
80
- "strongly_typed": false,
81
- "builder_opt": null,
82
- "profiling_verbosity": "layer_names_only",
83
- "enable_debug_output": false,
84
- "max_draft_len": 0,
85
- "plugin_config": {
86
- "bert_attention_plugin": "float16",
87
- "gpt_attention_plugin": "float16",
88
- "gemm_plugin": "float16",
89
- "smooth_quant_gemm_plugin": null,
90
- "identity_plugin": null,
91
- "layernorm_quantization_plugin": null,
92
- "rmsnorm_quantization_plugin": null,
93
- "nccl_plugin": null,
94
- "lookup_plugin": null,
95
- "lora_plugin": null,
96
- "weight_only_groupwise_quant_matmul_plugin": null,
97
- "weight_only_quant_matmul_plugin": null,
98
- "quantize_per_token_plugin": false,
99
- "quantize_tensor_plugin": false,
100
- "moe_plugin": "float16",
101
- "context_fmha": true,
102
- "context_fmha_fp32_acc": false,
103
- "paged_kv_cache": true,
104
- "remove_input_padding": true,
105
- "use_custom_all_reduce": true,
106
- "multi_block_mode": false,
107
- "enable_xqa": true,
108
- "attention_qk_half_accumulation": false,
109
- "tokens_per_block": 128,
110
- "use_paged_context_fmha": false,
111
- "use_context_fmha_for_generation": false
112
- }
113
- }
114
- }