{ "_name_or_path": "THUDM/chatglm2-6b", "add_bias_linear": false, "add_qkv_bias": true, "apply_query_key_layer_scaling": true, "apply_residual_connection_post_layernorm": false, "architectures": [ "ChatGLMForConditionalGeneration" ], "attention_dropout": 0.0, "attention_softmax_in_fp32": true, "auto_map": { "AutoConfig": "THUDM/chatglm2-6b--configuration_chatglm.ChatGLMConfig", "AutoModel": "THUDM/chatglm2-6b--modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModelForCausalLM": "THUDM/chatglm2-6b--modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModelForSeq2SeqLM": "THUDM/chatglm2-6b--modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModelForSequenceClassification": "THUDM/chatglm2-6b--modeling_chatglm.ChatGLMForSequenceClassification" }, "bias_dropout_fusion": true, "classifier_dropout": null, "eos_token_id": 2, "ffn_hidden_size": 13696, "fp32_residual_connection": false, "hidden_dropout": 0.0, "hidden_size": 4096, "kv_channels": 128, "layernorm_epsilon": 1e-05, "model_type": "chatglm", "multi_query_attention": true, "multi_query_group_num": 2, "num_attention_heads": 32, "num_layers": 28, "original_rope": true, "pad_token_id": 0, "padded_vocab_size": 65024, "post_layer_norm": true, "pre_seq_len": null, "prefix_projection": false, "quantization_bit": 0, "rmsnorm": true, "seq_length": 32768, "tie_word_embeddings": false, "torch_dtype": "float16", "transformers_version": "4.34.0", "use_cache": true, "vocab_size": 65024 }