lrl-modelcloud commited on
Commit
e16dec6
1 Parent(s): 29aa9d5

2306264a33ab13473de310a038882d2112583d7a02b122b5b78f5db3234d0f1b

Browse files
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/data/QwQ-32B-Preview",
4
+ "architectures": [
5
+ "Qwen2ForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 151643,
9
+ "eos_token_id": 151645,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 5120,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 27648,
14
+ "max_position_embeddings": 32768,
15
+ "max_window_layers": 64,
16
+ "model_type": "qwen2",
17
+ "num_attention_heads": 40,
18
+ "num_hidden_layers": 64,
19
+ "num_key_value_heads": 8,
20
+ "quantization_config": {
21
+ "bits": 4,
22
+ "checkpoint_format": "gptq",
23
+ "desc_act": true,
24
+ "dynamic": null,
25
+ "group_size": 32,
26
+ "lm_head": false,
27
+ "meta": {
28
+ "damp_auto_increment": 0.0015,
29
+ "damp_percent": 0.1,
30
+ "quantizer": [
31
+ "gptqmodel:1.4.4"
32
+ ],
33
+ "static_groups": false,
34
+ "true_sequential": true,
35
+ "uri": "https://github.com/modelcloud/gptqmodel"
36
+ },
37
+ "quant_method": "gptq",
38
+ "sym": true,
39
+ "true_sequential": true
40
+ },
41
+ "rms_norm_eps": 1e-05,
42
+ "rope_scaling": null,
43
+ "rope_theta": 1000000.0,
44
+ "sliding_window": null,
45
+ "tie_word_embeddings": false,
46
+ "torch_dtype": "bfloat16",
47
+ "transformers_version": "4.46.3",
48
+ "use_cache": false,
49
+ "use_sliding_window": false,
50
+ "vocab_size": 152064
51
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f147f801f9428896e103d67ad61db75a882799b4ab1d94bb7c5e88b33a4e6e9
3
+ size 1557135504
model-00002-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3dc344c7ff2ff9bbf46bc0f75248e9c192fecd7c0e485fc0b5deb7682d24e2a
3
+ size 10360