|
llama_model_loader: loaded meta data with 32 key-value pairs and 723 tensors from Palmyra-Fin-70B-32K-IMat-GGUF/Palmyra-Fin-70B-32K.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest)) |
|
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. |
|
llama_model_loader: - kv 0: general.architecture str = llama |
|
llama_model_loader: - kv 1: general.type str = model |
|
llama_model_loader: - kv 2: general.name str = Palmyra Fin 70B 32K |
|
llama_model_loader: - kv 3: general.organization str = Writer |
|
llama_model_loader: - kv 4: general.finetune str = 32k |
|
llama_model_loader: - kv 5: general.basename str = Palmyra-Fin |
|
llama_model_loader: - kv 6: general.size_label str = 70B |
|
llama_model_loader: - kv 7: general.license str = other |
|
llama_model_loader: - kv 8: general.license.name str = writer-open-model-license |
|
llama_model_loader: - kv 9: general.license.link str = https://writer.com/legal/open-model-l... |
|
llama_model_loader: - kv 10: general.tags arr[str,14] = ["instruct", "pytorch", "finance", "s... |
|
llama_model_loader: - kv 11: general.languages arr[str,1] = ["en"] |
|
llama_model_loader: - kv 12: llama.block_count u32 = 80 |
|
llama_model_loader: - kv 13: llama.context_length u32 = 32768 |
|
llama_model_loader: - kv 14: llama.embedding_length u32 = 8192 |
|
llama_model_loader: - kv 15: llama.feed_forward_length u32 = 28672 |
|
llama_model_loader: - kv 16: llama.attention.head_count u32 = 64 |
|
llama_model_loader: - kv 17: llama.attention.head_count_kv u32 = 8 |
|
llama_model_loader: - kv 18: llama.rope.freq_base f32 = 6315088.000000 |
|
llama_model_loader: - kv 19: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 |
|
llama_model_loader: - kv 20: general.file_type u32 = 7 |
|
llama_model_loader: - kv 21: llama.vocab_size u32 = 128256 |
|
llama_model_loader: - kv 22: llama.rope.dimension_count u32 = 128 |
|
llama_model_loader: - kv 23: tokenizer.ggml.model str = gpt2 |
|
llama_model_loader: - kv 24: tokenizer.ggml.pre str = smaug-bpe |
|
llama_model_loader: - kv 25: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ... |
|
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... |
|
llama_model_loader: - kv 27: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "... |
|
llama_model_loader: - kv 28: tokenizer.ggml.bos_token_id u32 = 128000 |
|
llama_model_loader: - kv 29: tokenizer.ggml.eos_token_id u32 = 128009 |
|
llama_model_loader: - kv 30: tokenizer.chat_template str = {% set loop_messages = messages %}{% ... |
|
llama_model_loader: - kv 31: general.quantization_version u32 = 2 |
|
llama_model_loader: - type f32: 161 tensors |
|
llama_model_loader: - type q8_0: 562 tensors |
|
llm_load_vocab: special tokens cache size = 256 |
|
llm_load_vocab: token to piece cache size = 0.8000 MB |
|
llm_load_print_meta: format = GGUF V3 (latest) |
|
llm_load_print_meta: arch = llama |
|
llm_load_print_meta: vocab type = BPE |
|
llm_load_print_meta: n_vocab = 128256 |
|
llm_load_print_meta: n_merges = 280147 |
|
llm_load_print_meta: vocab_only = 0 |
|
llm_load_print_meta: n_ctx_train = 32768 |
|
llm_load_print_meta: n_embd = 8192 |
|
llm_load_print_meta: n_layer = 80 |
|
llm_load_print_meta: n_head = 64 |
|
llm_load_print_meta: n_head_kv = 8 |
|
llm_load_print_meta: n_rot = 128 |
|
llm_load_print_meta: n_swa = 0 |
|
llm_load_print_meta: n_embd_head_k = 128 |
|
llm_load_print_meta: n_embd_head_v = 128 |
|
llm_load_print_meta: n_gqa = 8 |
|
llm_load_print_meta: n_embd_k_gqa = 1024 |
|
llm_load_print_meta: n_embd_v_gqa = 1024 |
|
llm_load_print_meta: f_norm_eps = 0.0e+00 |
|
llm_load_print_meta: f_norm_rms_eps = 1.0e-05 |
|
llm_load_print_meta: f_clamp_kqv = 0.0e+00 |
|
llm_load_print_meta: f_max_alibi_bias = 0.0e+00 |
|
llm_load_print_meta: f_logit_scale = 0.0e+00 |
|
llm_load_print_meta: n_ff = 28672 |
|
llm_load_print_meta: n_expert = 0 |
|
llm_load_print_meta: n_expert_used = 0 |
|
llm_load_print_meta: causal attn = 1 |
|
llm_load_print_meta: pooling type = 0 |
|
llm_load_print_meta: rope type = 0 |
|
llm_load_print_meta: rope scaling = linear |
|
llm_load_print_meta: freq_base_train = 6315088.0 |
|
llm_load_print_meta: freq_scale_train = 1 |
|
llm_load_print_meta: n_ctx_orig_yarn = 32768 |
|
llm_load_print_meta: rope_finetuned = unknown |
|
llm_load_print_meta: ssm_d_conv = 0 |
|
llm_load_print_meta: ssm_d_inner = 0 |
|
llm_load_print_meta: ssm_d_state = 0 |
|
llm_load_print_meta: ssm_dt_rank = 0 |
|
llm_load_print_meta: model type = 70B |
|
llm_load_print_meta: model ftype = Q8_0 |
|
llm_load_print_meta: model params = 70.55 B |
|
llm_load_print_meta: model size = 69.82 GiB (8.50 BPW) |
|
llm_load_print_meta: general.name = Palmyra Fin 70B 32K |
|
llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>' |
|
llm_load_print_meta: EOS token = 128009 '<|eot_id|>' |
|
llm_load_print_meta: LF token = 128 'Ä' |
|
llm_load_print_meta: EOT token = 128009 '<|eot_id|>' |
|
llm_load_print_meta: max token length = 256 |
|
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no |
|
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no |
|
ggml_cuda_init: found 1 CUDA devices: |
|
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes |
|
llm_load_tensors: ggml ctx size = 0.68 MiB |
|
llm_load_tensors: offloading 25 repeating layers to GPU |
|
llm_load_tensors: offloaded 25/81 layers to GPU |
|
llm_load_tensors: CPU buffer size = 71494.28 MiB |
|
llm_load_tensors: CUDA0 buffer size = 21676.56 MiB |
|
.................................................................................................... |
|
llama_new_context_with_model: n_ctx = 512 |
|
llama_new_context_with_model: n_batch = 512 |
|
llama_new_context_with_model: n_ubatch = 512 |
|
llama_new_context_with_model: flash_attn = 0 |
|
llama_new_context_with_model: freq_base = 6315088.0 |
|
llama_new_context_with_model: freq_scale = 1 |
|
llama_kv_cache_init: CUDA_Host KV buffer size = 110.00 MiB |
|
llama_kv_cache_init: CUDA0 KV buffer size = 50.00 MiB |
|
llama_new_context_with_model: KV self size = 160.00 MiB, K (f16): 80.00 MiB, V (f16): 80.00 MiB |
|
llama_new_context_with_model: CUDA_Host output buffer size = 0.49 MiB |
|
llama_new_context_with_model: CUDA0 compute buffer size = 1331.12 MiB |
|
llama_new_context_with_model: CUDA_Host compute buffer size = 17.01 MiB |
|
llama_new_context_with_model: graph nodes = 2566 |
|
llama_new_context_with_model: graph splits = 609 |
|
|
|
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | |
|
compute_imatrix: tokenizing the input .. |
|
compute_imatrix: tokenization took 126.907 ms |
|
compute_imatrix: computing over 125 chunks with batch_size 512 |
|
compute_imatrix: 6.04 seconds per pass - ETA 12.58 minutes |
|
[1]6.1440,[2]4.7452,[3]4.1216,[4]4.9314,[5]5.0081,[6]4.2135,[7]4.2735,[8]4.6681,[9]4.8739, |
|
save_imatrix: stored collected data after 10 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[10]4.5858,[11]5.0261,[12]5.4656,[13]5.9165,[14]6.2804,[15]6.4814,[16]6.7611,[17]6.9371,[18]6.6649,[19]6.3427, |
|
save_imatrix: stored collected data after 20 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[20]6.3391,[21]6.4281,[22]6.4250,[23]6.6474,[24]6.6518,[25]6.9319,[26]6.9256,[27]6.5617,[28]6.2843,[29]6.2872, |
|
save_imatrix: stored collected data after 30 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[30]6.2545,[31]5.9800,[32]5.7160,[33]5.6005,[34]5.5069,[35]5.5871,[36]5.6482,[37]5.6162,[38]5.6794,[39]5.8489, |
|
save_imatrix: stored collected data after 40 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[40]5.9275,[41]5.7433,[42]5.5645,[43]5.4150,[44]5.2665,[45]5.2281,[46]5.2048,[47]5.3152,[48]5.4025,[49]5.5079, |
|
save_imatrix: stored collected data after 50 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[50]5.4590,[51]5.5551,[52]5.6457,[53]5.7308,[54]5.7917,[55]5.8764,[56]5.9336,[57]6.0021,[58]6.0446,[59]6.0770, |
|
save_imatrix: stored collected data after 60 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[60]6.0581,[61]6.0574,[62]6.1048,[63]6.1625,[64]6.1075,[65]6.0976,[66]6.1092,[67]6.0971,[68]6.1110,[69]6.1082, |
|
save_imatrix: stored collected data after 70 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[70]6.1211,[71]6.1250,[72]6.1348,[73]6.1226,[74]6.0961,[75]6.0962,[76]6.1081,[77]6.0929,[78]6.0968,[79]6.1313, |
|
save_imatrix: stored collected data after 80 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[80]6.1531,[81]6.1465,[82]6.1606,[83]6.1917,[84]6.1254,[85]6.1244,[86]6.1312,[87]6.1491,[88]6.1875,[89]6.2506, |
|
save_imatrix: stored collected data after 90 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[90]6.2899,[91]6.3202,[92]6.3413,[93]6.3615,[94]6.3904,[95]6.4258,[96]6.3927,[97]6.4068,[98]6.4536,[99]6.5224, |
|
save_imatrix: stored collected data after 100 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[100]6.5836,[101]6.6276,[102]6.7285,[103]6.7596,[104]6.7928,[105]6.7456,[106]6.7562,[107]6.7182,[108]6.6500,[109]6.5811, |
|
save_imatrix: stored collected data after 110 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[110]6.6171,[111]6.6546,[112]6.6663,[113]6.6677,[114]6.6948,[115]6.7273,[116]6.7414,[117]6.7599,[118]6.7975,[119]6.7594, |
|
save_imatrix: stored collected data after 120 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
[120]6.6840,[121]6.6151,[122]6.5444,[123]6.4784,[124]6.4269,[125]6.3698, |
|
save_imatrix: stored collected data after 125 chunks in Palmyra-Fin-70B-32K-IMat-GGUF/imatrix.dat |
|
|
|
llama_print_timings: load time = 32479.28 ms |
|
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: prompt eval time = 707989.47 ms / 64000 tokens ( 11.06 ms per token, 90.40 tokens per second) |
|
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: total time = 735504.21 ms / 64001 tokens |
|
|
|
Final estimate: PPL = 6.3698 +/- 0.08949 |
|
|