legraphista
commited on
Upload imatrix.log with huggingface_hub
Browse files- imatrix.log +36 -34
imatrix.log
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
llama_model_loader: loaded meta data with
|
2 |
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
3 |
llama_model_loader: - kv 0: general.architecture str = gemma2
|
4 |
llama_model_loader: - kv 1: general.name str = gemma-2-27b-it
|
@@ -12,20 +12,22 @@ llama_model_loader: - kv 8: gemma2.attention.layer_norm_rms_epsilon f32
|
|
12 |
llama_model_loader: - kv 9: gemma2.attention.key_length u32 = 128
|
13 |
llama_model_loader: - kv 10: gemma2.attention.value_length u32 = 128
|
14 |
llama_model_loader: - kv 11: general.file_type u32 = 7
|
15 |
-
llama_model_loader: - kv 12:
|
16 |
-
llama_model_loader: - kv 13:
|
17 |
-
llama_model_loader: - kv 14:
|
18 |
-
llama_model_loader: - kv 15:
|
19 |
-
llama_model_loader: - kv 16:
|
20 |
-
llama_model_loader: - kv 17:
|
21 |
-
llama_model_loader: - kv 18:
|
22 |
-
llama_model_loader: - kv 19:
|
23 |
-
llama_model_loader: - kv 20:
|
24 |
-
llama_model_loader: - kv 21:
|
25 |
-
llama_model_loader: - kv 22:
|
26 |
-
llama_model_loader: - kv 23:
|
27 |
-
llama_model_loader: - kv 24:
|
28 |
-
llama_model_loader: - kv 25:
|
|
|
|
|
29 |
llama_model_loader: - type f32: 185 tensors
|
30 |
llama_model_loader: - type q8_0: 323 tensors
|
31 |
llm_load_vocab: special tokens cache size = 261
|
@@ -100,45 +102,45 @@ llama_new_context_with_model: KV self size = 184.00 MiB, K (f16): 92.00 MiB,
|
|
100 |
llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB
|
101 |
llama_new_context_with_model: CUDA0 compute buffer size = 1704.31 MiB
|
102 |
llama_new_context_with_model: CUDA_Host compute buffer size = 10.01 MiB
|
103 |
-
llama_new_context_with_model: graph nodes =
|
104 |
llama_new_context_with_model: graph splits = 121
|
105 |
|
106 |
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
107 |
compute_imatrix: tokenizing the input ..
|
108 |
-
compute_imatrix: tokenization took
|
109 |
compute_imatrix: computing over 128 chunks with batch_size 512
|
110 |
compute_imatrix: 1.92 seconds per pass - ETA 4.08 minutes
|
111 |
-
[1]
|
112 |
save_imatrix: stored collected data after 10 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
113 |
-
[10]
|
114 |
save_imatrix: stored collected data after 20 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
115 |
-
[20]
|
116 |
save_imatrix: stored collected data after 30 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
117 |
-
[30]
|
118 |
save_imatrix: stored collected data after 40 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
119 |
-
[40]
|
120 |
save_imatrix: stored collected data after 50 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
121 |
-
[50]
|
122 |
save_imatrix: stored collected data after 60 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
123 |
-
[60]
|
124 |
save_imatrix: stored collected data after 70 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
125 |
-
[70]
|
126 |
save_imatrix: stored collected data after 80 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
127 |
-
[80]
|
128 |
save_imatrix: stored collected data after 90 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
129 |
-
[90]
|
130 |
save_imatrix: stored collected data after 100 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
131 |
-
[100]
|
132 |
save_imatrix: stored collected data after 110 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
133 |
-
[110]
|
134 |
save_imatrix: stored collected data after 120 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
135 |
-
[120]
|
136 |
save_imatrix: stored collected data after 128 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
137 |
|
138 |
-
llama_print_timings: load time =
|
139 |
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
140 |
-
llama_print_timings: prompt eval time =
|
141 |
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
142 |
-
llama_print_timings: total time =
|
143 |
|
144 |
-
Final estimate: PPL =
|
|
|
1 |
+
llama_model_loader: loaded meta data with 28 key-value pairs and 508 tensors from gemma-2-27b-it-IMat-GGUF/gemma-2-27b-it.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest))
|
2 |
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
3 |
llama_model_loader: - kv 0: general.architecture str = gemma2
|
4 |
llama_model_loader: - kv 1: general.name str = gemma-2-27b-it
|
|
|
12 |
llama_model_loader: - kv 9: gemma2.attention.key_length u32 = 128
|
13 |
llama_model_loader: - kv 10: gemma2.attention.value_length u32 = 128
|
14 |
llama_model_loader: - kv 11: general.file_type u32 = 7
|
15 |
+
llama_model_loader: - kv 12: gemma2.attn_logit_softcapping f32 = 50.000000
|
16 |
+
llama_model_loader: - kv 13: gemma2.final_logit_softcapping f32 = 30.000000
|
17 |
+
llama_model_loader: - kv 14: tokenizer.ggml.model str = llama
|
18 |
+
llama_model_loader: - kv 15: tokenizer.ggml.pre str = default
|
19 |
+
llama_model_loader: - kv 16: tokenizer.ggml.tokens arr[str,256000] = ["<pad>", "<eos>", "<bos>", "<unk>", ...
|
20 |
+
llama_model_loader: - kv 17: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
|
21 |
+
llama_model_loader: - kv 18: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, ...
|
22 |
+
llama_model_loader: - kv 19: tokenizer.ggml.bos_token_id u32 = 2
|
23 |
+
llama_model_loader: - kv 20: tokenizer.ggml.eos_token_id u32 = 1
|
24 |
+
llama_model_loader: - kv 21: tokenizer.ggml.unknown_token_id u32 = 3
|
25 |
+
llama_model_loader: - kv 22: tokenizer.ggml.padding_token_id u32 = 0
|
26 |
+
llama_model_loader: - kv 23: tokenizer.ggml.add_bos_token bool = true
|
27 |
+
llama_model_loader: - kv 24: tokenizer.ggml.add_eos_token bool = false
|
28 |
+
llama_model_loader: - kv 25: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
|
29 |
+
llama_model_loader: - kv 26: tokenizer.ggml.add_space_prefix bool = false
|
30 |
+
llama_model_loader: - kv 27: general.quantization_version u32 = 2
|
31 |
llama_model_loader: - type f32: 185 tensors
|
32 |
llama_model_loader: - type q8_0: 323 tensors
|
33 |
llm_load_vocab: special tokens cache size = 261
|
|
|
102 |
llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB
|
103 |
llama_new_context_with_model: CUDA0 compute buffer size = 1704.31 MiB
|
104 |
llama_new_context_with_model: CUDA_Host compute buffer size = 10.01 MiB
|
105 |
+
llama_new_context_with_model: graph nodes = 1850
|
106 |
llama_new_context_with_model: graph splits = 121
|
107 |
|
108 |
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
109 |
compute_imatrix: tokenizing the input ..
|
110 |
+
compute_imatrix: tokenization took 94.256 ms
|
111 |
compute_imatrix: computing over 128 chunks with batch_size 512
|
112 |
compute_imatrix: 1.92 seconds per pass - ETA 4.08 minutes
|
113 |
+
[1]12.2429,[2]6.2081,[3]5.2588,[4]6.2085,[5]6.7166,[6]7.2390,[7]7.6746,[8]8.1610,[9]8.5380,
|
114 |
save_imatrix: stored collected data after 10 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
115 |
+
[10]7.7409,[11]7.6231,[12]8.3069,[13]8.8175,[14]8.9820,[15]9.5963,[16]9.7448,[17]9.8649,[18]10.2368,[19]10.1102,
|
116 |
save_imatrix: stored collected data after 20 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
117 |
+
[20]10.2363,[21]10.9791,[22]10.9424,[23]10.8230,[24]11.0184,[25]10.9711,[26]10.8238,[27]11.0156,[28]11.1952,[29]11.2073,
|
118 |
save_imatrix: stored collected data after 30 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
119 |
+
[30]11.4849,[31]10.7086,[32]10.2419,[33]9.8871,[34]9.6029,[35]9.3962,[36]9.5205,[37]9.7559,[38]9.8816,[39]10.0292,
|
120 |
save_imatrix: stored collected data after 40 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
121 |
+
[40]10.1462,[41]10.2011,[42]10.6384,[43]10.9116,[44]11.2397,[45]11.4389,[46]11.2550,[47]11.0805,[48]11.2637,[49]11.4301,
|
122 |
save_imatrix: stored collected data after 50 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
123 |
+
[50]11.2782,[51]11.1894,[52]11.2327,[53]11.4036,[54]11.5967,[55]11.8117,[56]11.9157,[57]11.9185,[58]11.9380,[59]11.7810,
|
124 |
save_imatrix: stored collected data after 60 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
125 |
+
[60]11.6650,[61]11.5287,[62]11.4071,[63]11.4760,[64]11.5875,[65]11.4612,[66]11.4694,[67]11.4358,[68]11.4182,[69]11.3768,
|
126 |
save_imatrix: stored collected data after 70 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
127 |
+
[70]11.3167,[71]11.3025,[72]11.2862,[73]11.3380,[74]11.2888,[75]11.1897,[76]11.1629,[77]11.1599,[78]11.1381,[79]11.0667,
|
128 |
save_imatrix: stored collected data after 80 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
129 |
+
[80]11.1144,[81]11.1690,[82]11.1841,[83]11.2762,[84]11.2937,[85]11.1199,[86]11.0622,[87]10.9503,[88]10.9787,[89]10.9774,
|
130 |
save_imatrix: stored collected data after 90 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
131 |
+
[90]11.0439,[91]11.0167,[92]10.9757,[93]10.9302,[94]10.8609,[95]10.8306,[96]10.7705,[97]10.7326,[98]10.6794,[99]10.7099,
|
132 |
save_imatrix: stored collected data after 100 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
133 |
+
[100]10.7050,[101]10.8097,[102]10.8830,[103]10.9458,[104]11.0763,[105]11.1774,[106]11.1839,[107]11.1889,[108]11.1470,[109]11.1662,
|
134 |
save_imatrix: stored collected data after 110 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
135 |
+
[110]11.0690,[111]10.9087,[112]10.7355,[113]10.7965,[114]10.8346,[115]10.8238,[116]10.8008,[117]10.8403,[118]10.8670,[119]10.8845,
|
136 |
save_imatrix: stored collected data after 120 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
137 |
+
[120]10.8804,[121]10.8749,[122]10.8379,[123]10.8537,[124]10.9275,[125]11.0106,[126]11.0989,[127]11.1369,[128]11.1788,
|
138 |
save_imatrix: stored collected data after 128 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
|
139 |
|
140 |
+
llama_print_timings: load time = 4035.26 ms
|
141 |
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
142 |
+
llama_print_timings: prompt eval time = 226486.47 ms / 65536 tokens ( 3.46 ms per token, 289.36 tokens per second)
|
143 |
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
144 |
+
llama_print_timings: total time = 230157.77 ms / 65537 tokens
|
145 |
|
146 |
+
Final estimate: PPL = 11.1788 +/- 0.20186
|