{ "_name_or_path": "/monster/data/model/glm-4-9b-chat", "add_bias_linear": false, "add_qkv_bias": true, "apply_query_key_layer_scaling": true, "apply_residual_connection_post_layernorm": false, "architectures": [ "ChatGLMModel" ], "attention_dropout": 0.0, "attention_softmax_in_fp32": true, "auto_map": { "AutoConfig": "configuration_chatglm.ChatGLMConfig", "AutoGPTQForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModelForSequenceClassification": "modeling_chatglm.ChatGLMForSequenceClassification" }, "bias_dropout_fusion": true, "classifier_dropout": null, "eos_token_id": [ 151329, 151336, 151338 ], "ffn_hidden_size": 13696, "fp32_residual_connection": false, "hidden_dropout": 0.0, "hidden_size": 4096, "kv_channels": 128, "layernorm_epsilon": 1.5625e-07, "model_type": "chatglm", "multi_query_attention": true, "multi_query_group_num": 2, "num_attention_heads": 32, "num_hidden_layers": 40, "num_layers": 40, "original_rope": true, "pad_token_id": 151329, "padded_vocab_size": 151552, "post_layer_norm": true, "quantization_config": { "bits": 4, "checkpoint_format": "gptq", "damp_percent": 0.005, "desc_act": false, "group_size": 128, "lm_head": false, "meta": { "quantizer": "autogptq:0.8.0.dev1" }, "model_file_base_name": null, "model_name_or_path": null, "quant_method": "gptq", "static_groups": false, "sym": true, "true_sequential": true }, "rmsnorm": true, "rope_ratio": 500, "seq_length": 131072, "tie_word_embeddings": false, "torch_dtype": "float16", "transformers_version": "4.41.2", "use_cache": true, "vocab_size": 151552 }