salamandra-2b / perplexity_Q6_K.txt
robbiemu's picture
update for quantization
5dadba4
raw
history blame
11 kB
build: 3906 (7eee341b) with Apple clang version 15.0.0 (clang-1500.3.9.4) for arm64-apple-darwin23.6.0
llama_model_loader: loaded meta data with 33 key-value pairs and 219 tensors from salamandra-2b_Q6_K.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = llama
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.size_label str = 2.3B
llama_model_loader: - kv 3: general.license str = apache-2.0
llama_model_loader: - kv 4: general.tags arr[str,1] = ["text-generation"]
llama_model_loader: - kv 5: general.languages arr[str,36] = ["bg", "ca", "code", "cs", "cy", "da"...
llama_model_loader: - kv 6: llama.block_count u32 = 24
llama_model_loader: - kv 7: llama.context_length u32 = 8192
llama_model_loader: - kv 8: llama.embedding_length u32 = 2048
llama_model_loader: - kv 9: llama.feed_forward_length u32 = 5440
llama_model_loader: - kv 10: llama.attention.head_count u32 = 16
llama_model_loader: - kv 11: llama.attention.head_count_kv u32 = 16
llama_model_loader: - kv 12: llama.rope.freq_base f32 = 10000.000000
llama_model_loader: - kv 13: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
llama_model_loader: - kv 14: general.file_type u32 = 18
llama_model_loader: - kv 15: llama.vocab_size u32 = 256000
llama_model_loader: - kv 16: llama.rope.dimension_count u32 = 128
llama_model_loader: - kv 17: tokenizer.ggml.add_space_prefix bool = true
llama_model_loader: - kv 18: tokenizer.ggml.model str = llama
llama_model_loader: - kv 19: tokenizer.ggml.pre str = default
llama_model_loader: - kv 20: tokenizer.ggml.tokens arr[str,256000] = ["<unk>", "<s>", "</s>", "<pad>", "<|...
llama_model_loader: - kv 21: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 22: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 1
llama_model_loader: - kv 24: tokenizer.ggml.eos_token_id u32 = 2
llama_model_loader: - kv 25: tokenizer.ggml.unknown_token_id u32 = 0
llama_model_loader: - kv 26: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 27: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 28: general.quantization_version u32 = 2
llama_model_loader: - kv 29: quantize.imatrix.file str = imatrix/oscar/imatrix.dat
llama_model_loader: - kv 30: quantize.imatrix.dataset str = ./imatrix/oscar/imatrix-dataset.txt
llama_model_loader: - kv 31: quantize.imatrix.entries_count i32 = 168
llama_model_loader: - kv 32: quantize.imatrix.chunks_count i32 = 44176
llama_model_loader: - type f32: 49 tensors
llama_model_loader: - type q8_0: 24 tensors
llama_model_loader: - type q6_K: 145 tensors
llama_model_loader: - type bf16: 1 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 104
llm_load_vocab: token to piece cache size = 1.8842 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = llama
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 2048
llm_load_print_meta: n_layer = 24
llm_load_print_meta: n_head = 16
llm_load_print_meta: n_head_kv = 16
llm_load_print_meta: n_rot = 128
llm_load_print_meta: n_swa = 0
llm_load_print_meta: n_embd_head_k = 128
llm_load_print_meta: n_embd_head_v = 128
llm_load_print_meta: n_gqa = 1
llm_load_print_meta: n_embd_k_gqa = 2048
llm_load_print_meta: n_embd_v_gqa = 2048
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-05
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 5440
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 0
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = Q6_K
llm_load_print_meta: model params = 2.25 B
llm_load_print_meta: model size = 2.36 GiB (8.99 BPW)
llm_load_print_meta: general.name = n/a
llm_load_print_meta: BOS token = 1 '<s>'
llm_load_print_meta: EOS token = 2 '</s>'
llm_load_print_meta: UNK token = 0 '<unk>'
llm_load_print_meta: LF token = 145 '<0x0A>'
llm_load_print_meta: EOT token = 5 '<|im_end|>'
llm_load_print_meta: EOG token = 2 '</s>'
llm_load_print_meta: EOG token = 5 '<|im_end|>'
llm_load_print_meta: max token length = 72
llm_load_tensors: ggml ctx size = 0.20 MiB
llm_load_tensors: offloading 24 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 25/25 layers to GPU
llm_load_tensors: Metal buffer size = 2414.84 MiB
llm_load_tensors: CPU buffer size = 410.16 MiB
............................................
llama_new_context_with_model: n_ctx = 8192
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 128
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
ggml_metal_init: allocating
ggml_metal_init: found device: Apple M3 Max
ggml_metal_init: picking default device: Apple M3 Max
ggml_metal_init: using embedded metal library
ggml_metal_init: GPU name: Apple M3 Max
ggml_metal_init: GPU family: MTLGPUFamilyApple9 (1009)
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)
ggml_metal_init: GPU family: MTLGPUFamilyMetal3 (5001)
ggml_metal_init: simdgroup reduction support = true
ggml_metal_init: simdgroup matrix mul. support = true
ggml_metal_init: hasUnifiedMemory = true
ggml_metal_init: recommendedMaxWorkingSetSize = 42949.67 MB
llama_kv_cache_init: Metal KV buffer size = 1536.00 MiB
llama_new_context_with_model: KV self size = 1536.00 MiB, K (f16): 768.00 MiB, V (f16): 768.00 MiB
llama_new_context_with_model: CPU output buffer size = 0.98 MiB
llama_new_context_with_model: Metal compute buffer size = 72.00 MiB
llama_new_context_with_model: CPU compute buffer size = 125.00 MiB
llama_new_context_with_model: graph nodes = 774
llama_new_context_with_model: graph splits = 3
common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable)
system_info: n_threads = 15 (n_threads_batch = 15) / 16 | AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 1 | SVE = 0 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 1 | LLAMAFILE = 1 |
perplexity: tokenizing the input ..
perplexity: tokenization took 2907.01 ms
perplexity: calculating perplexity over 134 chunks, n_ctx=8192, batch_size=512, n_seq=1
perplexity: 11.99 seconds per pass - ETA 26.77 minutes
[1]15.5671,[2]15.5354,[3]14.1867,[4]14.0224,[5]13.4438,[6]13.0938,[7]13.7932,[8]13.2838,[9]13.0506,[10]12.4478,[11]13.0355,[12]13.1075,[13]14.0250,[14]14.2661,[15]14.2868,[16]14.7865,[17]15.0463,[18]14.9830,[19]15.0321,[20]15.3132,[21]15.3566,[22]13.6058,[23]13.7680,[24]13.4281,[25]12.9772,[26]12.5977,[27]12.4350,[28]12.2870,[29]12.2463,[30]12.0709,[31]12.2743,[32]12.3790,[33]12.7896,[34]13.0686,[35]13.3367,[36]13.1396,[37]13.1353,[38]13.2030,[39]13.0716,[40]13.0978,[41]13.0698,[42]12.9100,[43]12.8635,[44]13.0151,[45]13.1980,[46]13.0664,[47]13.2807,[48]13.3847,[49]13.6334,[50]13.8391,[51]13.8718,[52]14.0530,[53]14.3267,[54]14.6003,[55]14.6927,[56]14.5439,[57]14.4517,[58]14.2199,[59]14.1263,[60]13.9513,[61]13.9924,[62]14.1084,[63]14.2678,[64]14.3140,[65]14.3323,[66]14.4939,[67]14.4779,[68]14.3858,[69]14.2657,[70]14.1754,[71]14.1734,[72]14.1275,[73]14.1363,[74]14.0836,[75]14.0393,[76]13.9844,[77]14.0350,[78]14.0318,[79]14.0402,[80]14.0696,[81]13.8431,[82]13.8301,[83]13.7174,[84]13.7453,[85]13.7864,[86]13.9579,[87]13.9783,[88]14.1129,[89]14.1591,[90]14.2673,[91]14.3161,[92]14.1768,[93]14.2359,[94]14.2260,[95]14.3460,[96]14.5153,[97]14.5796,[98]14.6642,[99]14.7659,[100]14.8039,[101]14.8319,[102]14.8008,[103]14.7748,[104]14.7584,[105]14.7434,[106]14.6296,[107]14.5192,[108]14.5776,[109]14.5968,[110]14.5177,[111]14.4863,[112]14.3496,[113]14.2286,[114]14.2231,[115]14.1994,[116]14.2082,[117]14.1157,[118]14.0027,[119]14.0001,[120]14.0533,[121]14.0691,[122]14.0888,[123]14.1212,[124]14.1350,[125]14.1309,[126]14.1538,[127]14.1773,[128]14.2468,[129]14.2379,[130]14.2192,[131]14.2682,[132]14.2462,[133]14.1989,[134]14.0675,
Final estimate: PPL = 14.0675 +/- 0.05597
llama_perf_context_print: load time = 1476.34 ms
llama_perf_context_print: prompt eval time = 1558050.78 ms / 1097728 tokens ( 1.42 ms per token, 704.55 tokens per second)
llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
llama_perf_context_print: total time = 1614454.29 ms / 1097729 tokens
ggml_metal_free: deallocating