alexwww94 commited on
Commit
8a737da
1 Parent(s): 829e22f

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "THUDM/glm-4v-9b",
3
+ "add_bias_linear": false,
4
+ "add_qkv_bias": true,
5
+ "apply_query_key_layer_scaling": true,
6
+ "apply_residual_connection_post_layernorm": false,
7
+ "architectures": [
8
+ "ChatGLMModel"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "attention_softmax_in_fp32": true,
12
+ "auto_map": {
13
+ "AutoConfig": "THUDM/glm-4v-9b--configuration_chatglm.ChatGLMConfig",
14
+ "AutoModel": "THUDM/glm-4v-9b--modeling_chatglm.ChatGLMForConditionalGeneration",
15
+ "AutoModelForCausalLM": "THUDM/glm-4v-9b--modeling_chatglm.ChatGLMForConditionalGeneration",
16
+ "AutoModelForSeq2SeqLM": "THUDM/glm-4v-9b--modeling_chatglm.ChatGLMForConditionalGeneration",
17
+ "AutoModelForSequenceClassification": "THUDM/glm-4v-9b--modeling_chatglm.ChatGLMForSequenceClassification"
18
+ },
19
+ "bias_dropout_fusion": true,
20
+ "boi_token_id": 151339,
21
+ "classifier_dropout": null,
22
+ "eoi_token_id": 151340,
23
+ "eos_token_id": [
24
+ 151329,
25
+ 151336,
26
+ 151338
27
+ ],
28
+ "ffn_hidden_size": 13696,
29
+ "fp32_residual_connection": false,
30
+ "hidden_dropout": 0.0,
31
+ "hidden_size": 4096,
32
+ "kv_channels": 128,
33
+ "layernorm_epsilon": 1.5625e-07,
34
+ "model_type": "chatglm",
35
+ "multi_query_attention": true,
36
+ "multi_query_group_num": 2,
37
+ "num_attention_heads": 32,
38
+ "num_layers": 40,
39
+ "original_rope": true,
40
+ "pad_token_id": 151329,
41
+ "padded_vocab_size": 151552,
42
+ "post_layer_norm": true,
43
+ "pre_seq_len": null,
44
+ "prefix_projection": false,
45
+ "quantization_config": {
46
+ "bits": 4,
47
+ "checkpoint_format": "gptq",
48
+ "damp_percent": 0.01,
49
+ "desc_act": false,
50
+ "group_size": 128,
51
+ "model_file_base_name": null,
52
+ "model_name_or_path": null,
53
+ "quant_method": "gptq",
54
+ "static_groups": false,
55
+ "sym": true,
56
+ "true_sequential": true
57
+ },
58
+ "rmsnorm": true,
59
+ "rope_ratio": 1,
60
+ "seq_length": 8192,
61
+ "tie_word_embeddings": false,
62
+ "torch_dtype": "bfloat16",
63
+ "transformers_version": "4.43.4",
64
+ "use_cache": false,
65
+ "vision_config": {
66
+ "dropout_prob": 0.0,
67
+ "hidden_act": "gelu",
68
+ "hidden_size": 1792,
69
+ "image_size": 1120,
70
+ "in_channels": 3,
71
+ "intermediate_size": 15360,
72
+ "layer_norm_eps": 1e-06,
73
+ "num_heads": 16,
74
+ "num_hidden_layers": 63,
75
+ "num_positions": 6401,
76
+ "patch_size": 14,
77
+ "scaling_factor": 8
78
+ },
79
+ "vocab_size": 151552
80
+ }
gptq_model-4bit-128g.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5cb1583ef07d050192d3703cc1a48e9e2bea64a9c4ede1bd61236c6a6e052e2
3
+ size 9138334800
quantize_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": 128,
4
+ "damp_percent": 0.01,
5
+ "desc_act": false,
6
+ "static_groups": false,
7
+ "sym": true,
8
+ "true_sequential": true,
9
+ "model_name_or_path": null,
10
+ "model_file_base_name": null,
11
+ "quant_method": "gptq",
12
+ "checkpoint_format": "gptq"
13
+ }