legraphista
commited on
Commit
•
bf61baf
1
Parent(s):
48d4f98
Upload imatrix.log with huggingface_hub
Browse files- imatrix.log +154 -0
imatrix.log
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
llama_model_loader: loaded meta data with 29 key-value pairs and 219 tensors from Yi-Coder-1.5B-IMat-GGUF/Yi-Coder-1.5B.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest))
|
2 |
+
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
3 |
+
llama_model_loader: - kv 0: general.architecture str = llama
|
4 |
+
llama_model_loader: - kv 1: general.type str = model
|
5 |
+
llama_model_loader: - kv 2: general.name str = Yi Coder 1.5B
|
6 |
+
llama_model_loader: - kv 3: general.basename str = Yi-Coder
|
7 |
+
llama_model_loader: - kv 4: general.size_label str = 1.5B
|
8 |
+
llama_model_loader: - kv 5: general.license str = apache-2.0
|
9 |
+
llama_model_loader: - kv 6: llama.block_count u32 = 24
|
10 |
+
llama_model_loader: - kv 7: llama.context_length u32 = 131072
|
11 |
+
llama_model_loader: - kv 8: llama.embedding_length u32 = 2048
|
12 |
+
llama_model_loader: - kv 9: llama.feed_forward_length u32 = 5504
|
13 |
+
llama_model_loader: - kv 10: llama.attention.head_count u32 = 16
|
14 |
+
llama_model_loader: - kv 11: llama.attention.head_count_kv u32 = 16
|
15 |
+
llama_model_loader: - kv 12: llama.rope.freq_base f32 = 10000000.000000
|
16 |
+
llama_model_loader: - kv 13: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
|
17 |
+
llama_model_loader: - kv 14: general.file_type u32 = 7
|
18 |
+
llama_model_loader: - kv 15: llama.vocab_size u32 = 64000
|
19 |
+
llama_model_loader: - kv 16: llama.rope.dimension_count u32 = 128
|
20 |
+
llama_model_loader: - kv 17: tokenizer.ggml.model str = llama
|
21 |
+
llama_model_loader: - kv 18: tokenizer.ggml.pre str = default
|
22 |
+
llama_model_loader: - kv 19: tokenizer.ggml.tokens arr[str,64000] = ["<unk>", "<|startoftext|>", "<|endof...
|
23 |
+
llama_model_loader: - kv 20: tokenizer.ggml.scores arr[f32,64000] = [-1000.000000, -1000.000000, -1000.00...
|
24 |
+
llama_model_loader: - kv 21: tokenizer.ggml.token_type arr[i32,64000] = [3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, ...
|
25 |
+
llama_model_loader: - kv 22: tokenizer.ggml.bos_token_id u32 = 1
|
26 |
+
llama_model_loader: - kv 23: tokenizer.ggml.eos_token_id u32 = 2
|
27 |
+
llama_model_loader: - kv 24: tokenizer.ggml.unknown_token_id u32 = 0
|
28 |
+
llama_model_loader: - kv 25: tokenizer.ggml.padding_token_id u32 = 0
|
29 |
+
llama_model_loader: - kv 26: tokenizer.ggml.add_bos_token bool = false
|
30 |
+
llama_model_loader: - kv 27: tokenizer.ggml.add_eos_token bool = false
|
31 |
+
llama_model_loader: - kv 28: general.quantization_version u32 = 2
|
32 |
+
llama_model_loader: - type f32: 49 tensors
|
33 |
+
llama_model_loader: - type q8_0: 170 tensors
|
34 |
+
llm_load_vocab: special tokens cache size = 11
|
35 |
+
llm_load_vocab: token to piece cache size = 0.3834 MB
|
36 |
+
llm_load_print_meta: format = GGUF V3 (latest)
|
37 |
+
llm_load_print_meta: arch = llama
|
38 |
+
llm_load_print_meta: vocab type = SPM
|
39 |
+
llm_load_print_meta: n_vocab = 64000
|
40 |
+
llm_load_print_meta: n_merges = 0
|
41 |
+
llm_load_print_meta: vocab_only = 0
|
42 |
+
llm_load_print_meta: n_ctx_train = 131072
|
43 |
+
llm_load_print_meta: n_embd = 2048
|
44 |
+
llm_load_print_meta: n_layer = 24
|
45 |
+
llm_load_print_meta: n_head = 16
|
46 |
+
llm_load_print_meta: n_head_kv = 16
|
47 |
+
llm_load_print_meta: n_rot = 128
|
48 |
+
llm_load_print_meta: n_swa = 0
|
49 |
+
llm_load_print_meta: n_embd_head_k = 128
|
50 |
+
llm_load_print_meta: n_embd_head_v = 128
|
51 |
+
llm_load_print_meta: n_gqa = 1
|
52 |
+
llm_load_print_meta: n_embd_k_gqa = 2048
|
53 |
+
llm_load_print_meta: n_embd_v_gqa = 2048
|
54 |
+
llm_load_print_meta: f_norm_eps = 0.0e+00
|
55 |
+
llm_load_print_meta: f_norm_rms_eps = 1.0e-05
|
56 |
+
llm_load_print_meta: f_clamp_kqv = 0.0e+00
|
57 |
+
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
|
58 |
+
llm_load_print_meta: f_logit_scale = 0.0e+00
|
59 |
+
llm_load_print_meta: n_ff = 5504
|
60 |
+
llm_load_print_meta: n_expert = 0
|
61 |
+
llm_load_print_meta: n_expert_used = 0
|
62 |
+
llm_load_print_meta: causal attn = 1
|
63 |
+
llm_load_print_meta: pooling type = 0
|
64 |
+
llm_load_print_meta: rope type = 0
|
65 |
+
llm_load_print_meta: rope scaling = linear
|
66 |
+
llm_load_print_meta: freq_base_train = 10000000.0
|
67 |
+
llm_load_print_meta: freq_scale_train = 1
|
68 |
+
llm_load_print_meta: n_ctx_orig_yarn = 131072
|
69 |
+
llm_load_print_meta: rope_finetuned = unknown
|
70 |
+
llm_load_print_meta: ssm_d_conv = 0
|
71 |
+
llm_load_print_meta: ssm_d_inner = 0
|
72 |
+
llm_load_print_meta: ssm_d_state = 0
|
73 |
+
llm_load_print_meta: ssm_dt_rank = 0
|
74 |
+
llm_load_print_meta: ssm_dt_b_c_rms = 0
|
75 |
+
llm_load_print_meta: model type = ?B
|
76 |
+
llm_load_print_meta: model ftype = Q8_0
|
77 |
+
llm_load_print_meta: model params = 1.48 B
|
78 |
+
llm_load_print_meta: model size = 1.46 GiB (8.50 BPW)
|
79 |
+
llm_load_print_meta: general.name = Yi Coder 1.5B
|
80 |
+
llm_load_print_meta: BOS token = 1 '<|startoftext|>'
|
81 |
+
llm_load_print_meta: EOS token = 2 '<|endoftext|>'
|
82 |
+
llm_load_print_meta: UNK token = 0 '<unk>'
|
83 |
+
llm_load_print_meta: PAD token = 0 '<unk>'
|
84 |
+
llm_load_print_meta: LF token = 315 '<0x0A>'
|
85 |
+
llm_load_print_meta: EOT token = 2 '<|endoftext|>'
|
86 |
+
llm_load_print_meta: max token length = 48
|
87 |
+
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
88 |
+
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
|
89 |
+
ggml_cuda_init: found 1 CUDA devices:
|
90 |
+
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes
|
91 |
+
llm_load_tensors: ggml ctx size = 0.20 MiB
|
92 |
+
llm_load_tensors: offloading 24 repeating layers to GPU
|
93 |
+
llm_load_tensors: offloading non-repeating layers to GPU
|
94 |
+
llm_load_tensors: offloaded 25/25 layers to GPU
|
95 |
+
llm_load_tensors: CPU buffer size = 132.81 MiB
|
96 |
+
llm_load_tensors: CUDA0 buffer size = 1363.58 MiB
|
97 |
+
.....................................................................................
|
98 |
+
llama_new_context_with_model: n_ctx = 512
|
99 |
+
llama_new_context_with_model: n_batch = 512
|
100 |
+
llama_new_context_with_model: n_ubatch = 512
|
101 |
+
llama_new_context_with_model: flash_attn = 0
|
102 |
+
llama_new_context_with_model: freq_base = 10000000.0
|
103 |
+
llama_new_context_with_model: freq_scale = 1
|
104 |
+
llama_kv_cache_init: CUDA0 KV buffer size = 96.00 MiB
|
105 |
+
llama_new_context_with_model: KV self size = 96.00 MiB, K (f16): 48.00 MiB, V (f16): 48.00 MiB
|
106 |
+
llama_new_context_with_model: CUDA_Host output buffer size = 0.24 MiB
|
107 |
+
llama_new_context_with_model: CUDA0 compute buffer size = 129.00 MiB
|
108 |
+
llama_new_context_with_model: CUDA_Host compute buffer size = 5.01 MiB
|
109 |
+
llama_new_context_with_model: graph nodes = 774
|
110 |
+
llama_new_context_with_model: graph splits = 2
|
111 |
+
|
112 |
+
system_info: n_threads = 25 (n_threads_batch = 25) / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
113 |
+
compute_imatrix: tokenizing the input ..
|
114 |
+
compute_imatrix: tokenization took 96.062 ms
|
115 |
+
compute_imatrix: computing over 146 chunks with batch_size 512
|
116 |
+
compute_imatrix: 0.28 seconds per pass - ETA 0.68 minutes
|
117 |
+
[1]10.3261,[2]7.6669,[3]7.7806,[4]8.7211,[5]8.4687,[6]8.9730,[7]7.9033,[8]8.6189,[9]8.5616,
|
118 |
+
save_imatrix: stored collected data after 10 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
119 |
+
[10]9.2989,[11]9.2255,[12]8.3840,[13]8.8372,[14]9.6415,[15]9.8885,[16]10.3425,[17]10.7270,[18]10.8027,[19]10.9580,
|
120 |
+
save_imatrix: stored collected data after 20 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
121 |
+
[20]11.3447,[21]10.7243,[22]10.5280,[23]10.7871,[24]10.8674,[25]10.8901,[26]10.5022,[27]10.8212,[28]11.0584,[29]11.3613,
|
122 |
+
save_imatrix: stored collected data after 30 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
123 |
+
[30]11.4841,[31]11.8022,[32]12.1698,[33]12.1560,[34]12.0023,[35]11.6001,[36]10.9225,[37]10.3293,[38]10.3075,[39]10.2495,
|
124 |
+
save_imatrix: stored collected data after 40 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
125 |
+
[40]10.1666,[41]9.8749,[42]9.5947,[43]9.4123,[44]9.1555,[45]8.9784,[46]8.9266,[47]9.0523,[48]9.1878,[49]9.3432,
|
126 |
+
save_imatrix: stored collected data after 50 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
127 |
+
[50]9.5712,[51]9.9950,[52]10.3798,[53]10.6499,[54]10.8651,[55]10.9412,[56]10.8844,[57]11.0608,[58]11.1464,[59]11.2613,
|
128 |
+
save_imatrix: stored collected data after 60 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
129 |
+
[60]11.1391,[61]11.0098,[62]11.0310,[63]11.2092,[64]11.3302,[65]11.4643,[66]11.5648,[67]11.6410,[68]11.7042,[69]11.7550,
|
130 |
+
save_imatrix: stored collected data after 70 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
131 |
+
[70]11.5773,[71]11.4714,[72]11.3477,[73]11.2543,[74]11.2936,[75]11.3877,[76]11.3775,[77]11.3895,[78]11.3749,[79]11.3382,
|
132 |
+
save_imatrix: stored collected data after 80 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
133 |
+
[80]11.2772,[81]11.1772,[82]11.1956,[83]11.1453,[84]11.0887,[85]11.0973,[86]11.0338,[87]10.9648,[88]10.9111,[89]10.9139,
|
134 |
+
save_imatrix: stored collected data after 90 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
135 |
+
[90]10.8585,[91]10.8284,[92]10.7128,[93]10.6836,[94]10.7654,[95]10.7780,[96]10.7185,[97]10.7426,[98]10.7601,[99]10.8152,
|
136 |
+
save_imatrix: stored collected data after 100 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
137 |
+
[100]10.6570,[101]10.7233,[102]10.7604,[103]10.7894,[104]10.8055,[105]10.8313,[106]10.7307,[107]10.6429,[108]10.5462,[109]10.4416,
|
138 |
+
save_imatrix: stored collected data after 110 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
139 |
+
[110]10.3491,[111]10.2626,[112]10.1761,[113]10.0891,[114]10.0589,[115]10.0787,[116]10.1276,[117]10.2350,[118]10.3384,[119]10.4326,
|
140 |
+
save_imatrix: stored collected data after 120 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
141 |
+
[120]10.5822,[121]10.6911,[122]10.7156,[123]10.7286,[124]10.6617,[125]10.6744,[126]10.6471,[127]10.5930,[128]10.5230,[129]10.5357,
|
142 |
+
save_imatrix: stored collected data after 130 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
143 |
+
[130]10.6002,[131]10.6054,[132]10.6551,[133]10.6937,[134]10.7523,[135]10.7860,[136]10.7983,[137]10.8177,[138]10.8081,[139]10.7765,
|
144 |
+
save_imatrix: stored collected data after 140 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
145 |
+
[140]10.8588,[141]10.9447,[142]11.0363,[143]11.1297,[144]11.2265,[145]11.3204,[146]11.3915,
|
146 |
+
save_imatrix: stored collected data after 146 chunks in Yi-Coder-1.5B-IMat-GGUF/imatrix.dat
|
147 |
+
|
148 |
+
llama_print_timings: load time = 778.91 ms
|
149 |
+
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
150 |
+
llama_print_timings: prompt eval time = 27576.74 ms / 74752 tokens ( 0.37 ms per token, 2710.69 tokens per second)
|
151 |
+
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
152 |
+
llama_print_timings: total time = 28614.33 ms / 74753 tokens
|
153 |
+
|
154 |
+
Final estimate: PPL = 11.3915 +/- 0.16460
|