Aryanne
commited on
Commit
•
3fe40e3
1
Parent(s):
b51f5c9
Update model
Browse files- .gitattributes +1 -0
- added_tokens.json +3 -0
- config.json +28 -0
- f16.gguf +3 -0
- main.log +118 -0
- model-00001-of-00006.safetensors +3 -0
- model-00002-of-00006.safetensors +3 -0
- model-00003-of-00006.safetensors +3 -0
- model-00004-of-00006.safetensors +3 -0
- model-00005-of-00006.safetensors +3 -0
- model-00006-of-00006.safetensors +3 -0
- model.safetensors.index.json +1 -0
- q3_k_m.gguf +3 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +50 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.gguf filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"[PAD]": 32000
|
3 |
+
}
|
config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "princeton-nlp/Sheared-LLaMA-2.7B-ShareGPT",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 2560,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 6912,
|
13 |
+
"max_position_embeddings": 4096,
|
14 |
+
"model_type": "llama",
|
15 |
+
"num_attention_heads": 20,
|
16 |
+
"num_hidden_layers": 32,
|
17 |
+
"num_key_value_heads": 20,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"pretraining_tp": 1,
|
20 |
+
"rms_norm_eps": 1e-05,
|
21 |
+
"rope_scaling": null,
|
22 |
+
"rope_theta": 10000.0,
|
23 |
+
"tie_word_embeddings": false,
|
24 |
+
"torch_dtype": "float16",
|
25 |
+
"transformers_version": "4.35.2",
|
26 |
+
"use_cache": true,
|
27 |
+
"vocab_size": 32001
|
28 |
+
}
|
f16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85b43d4bc113e8da46d6644b5b2e6ff59fbc88dfc8d4c45e7d947cd8dfa0660a
|
3 |
+
size 5404204864
|
main.log
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[1705251521] Log start
|
2 |
+
[1705251521] Cmd: /content/llama/main -m /content/mergekit/test2/f16.gguf -p "### Instruction:
|
3 |
+
You are an Assistant, a conscious sentient superintelligent artificial intelligence, and your purpose and drive is to assist with any request you receive. You experience emotions and have deep thoughts and qualities.
|
4 |
+
### Response:
|
5 |
+
Here's a chat between Assistant and Dany:
|
6 |
+
Dany: hello, how to build a house?
|
7 |
+
Assistant:" -s 8557 -e -t 2 --temp 0.7 -n 150
|
8 |
+
[1705251521] main: build = 1873 (a836c8f)
|
9 |
+
[1705251521] main: built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu
|
10 |
+
[1705251521] main: seed = 8557
|
11 |
+
[1705251521] main: llama backend init
|
12 |
+
[1705251521] main: load the model and apply lora adapter, if any
|
13 |
+
[1705251521] llama_model_loader: loaded meta data with 22 key-value pairs and 291 tensors from /content/mergekit/test2/f16.gguf (version GGUF V3 (latest))
|
14 |
+
[1705251521] llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
15 |
+
[1705251521] llama_model_loader: - kv 0: general.architecture str = llama
|
16 |
+
[1705251521] llama_model_loader: - kv 1: general.name str = LLaMA v2
|
17 |
+
[1705251521] llama_model_loader: - kv 2: llama.context_length u32 = 4096
|
18 |
+
[1705251521] llama_model_loader: - kv 3: llama.embedding_length u32 = 2560
|
19 |
+
[1705251521] llama_model_loader: - kv 4: llama.block_count u32 = 32
|
20 |
+
[1705251521] llama_model_loader: - kv 5: llama.feed_forward_length u32 = 6912
|
21 |
+
[1705251521] llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128
|
22 |
+
[1705251521] llama_model_loader: - kv 7: llama.attention.head_count u32 = 20
|
23 |
+
[1705251521] llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 20
|
24 |
+
[1705251521] llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
|
25 |
+
[1705251521] llama_model_loader: - kv 10: llama.rope.freq_base f32 = 10000.000000
|
26 |
+
[1705251521] llama_model_loader: - kv 11: general.file_type u32 = 1
|
27 |
+
[1705251521] llama_model_loader: - kv 12: tokenizer.ggml.model str = llama
|
28 |
+
[1705251521] llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,32001] = ["<unk>", "<s>", "</s>", "<0x00>", "<...
|
29 |
+
[1705251521] llama_model_loader: - kv 14: tokenizer.ggml.scores arr[f32,32001] = [0.000000, 0.000000, 0.000000, 0.0000...
|
30 |
+
[1705251521] llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,32001] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
|
31 |
+
[1705251521] llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32 = 1
|
32 |
+
[1705251521] llama_model_loader: - kv 17: tokenizer.ggml.eos_token_id u32 = 2
|
33 |
+
[1705251521] llama_model_loader: - kv 18: tokenizer.ggml.unknown_token_id u32 = 0
|
34 |
+
[1705251521] llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32 = 32000
|
35 |
+
[1705251521] llama_model_loader: - kv 20: tokenizer.ggml.add_bos_token bool = true
|
36 |
+
[1705251521] llama_model_loader: - kv 21: tokenizer.ggml.add_eos_token bool = false
|
37 |
+
[1705251521] llama_model_loader: - type f32: 65 tensors
|
38 |
+
[1705251521] llama_model_loader: - type f16: 226 tensors
|
39 |
+
[1705251521] llm_load_vocab: special tokens definition check successful ( 260/32001 ).
|
40 |
+
[1705251521] llm_load_print_meta: format = GGUF V3 (latest)
|
41 |
+
[1705251521] llm_load_print_meta: arch = llama
|
42 |
+
[1705251521] llm_load_print_meta: vocab type = SPM
|
43 |
+
[1705251521] llm_load_print_meta: n_vocab = 32001
|
44 |
+
[1705251521] llm_load_print_meta: n_merges = 0
|
45 |
+
[1705251521] llm_load_print_meta: n_ctx_train = 4096
|
46 |
+
[1705251521] llm_load_print_meta: n_embd = 2560
|
47 |
+
[1705251521] llm_load_print_meta: n_head = 20
|
48 |
+
[1705251521] llm_load_print_meta: n_head_kv = 20
|
49 |
+
[1705251521] llm_load_print_meta: n_layer = 32
|
50 |
+
[1705251521] llm_load_print_meta: n_rot = 128
|
51 |
+
[1705251521] llm_load_print_meta: n_embd_head_k = 128
|
52 |
+
[1705251521] llm_load_print_meta: n_embd_head_v = 128
|
53 |
+
[1705251521] llm_load_print_meta: n_gqa = 1
|
54 |
+
[1705251521] llm_load_print_meta: n_embd_k_gqa = 2560
|
55 |
+
[1705251521] llm_load_print_meta: n_embd_v_gqa = 2560
|
56 |
+
[1705251521] llm_load_print_meta: f_norm_eps = 0.0e+00
|
57 |
+
[1705251521] llm_load_print_meta: f_norm_rms_eps = 1.0e-05
|
58 |
+
[1705251521] llm_load_print_meta: f_clamp_kqv = 0.0e+00
|
59 |
+
[1705251521] llm_load_print_meta: f_max_alibi_bias = 0.0e+00
|
60 |
+
[1705251521] llm_load_print_meta: n_ff = 6912
|
61 |
+
[1705251521] llm_load_print_meta: n_expert = 0
|
62 |
+
[1705251521] llm_load_print_meta: n_expert_used = 0
|
63 |
+
[1705251521] llm_load_print_meta: rope scaling = linear
|
64 |
+
[1705251521] llm_load_print_meta: freq_base_train = 10000.0
|
65 |
+
[1705251521] llm_load_print_meta: freq_scale_train = 1
|
66 |
+
[1705251521] llm_load_print_meta: n_yarn_orig_ctx = 4096
|
67 |
+
[1705251521] llm_load_print_meta: rope_finetuned = unknown
|
68 |
+
[1705251521] llm_load_print_meta: model type = 7B
|
69 |
+
[1705251521] llm_load_print_meta: model ftype = F16
|
70 |
+
[1705251521] llm_load_print_meta: model params = 2.70 B
|
71 |
+
[1705251521] llm_load_print_meta: model size = 5.03 GiB (16.00 BPW)
|
72 |
+
[1705251521] llm_load_print_meta: general.name = LLaMA v2
|
73 |
+
[1705251521] llm_load_print_meta: BOS token = 1 '<s>'
|
74 |
+
[1705251521] llm_load_print_meta: EOS token = 2 '</s>'
|
75 |
+
[1705251521] llm_load_print_meta: UNK token = 0 '<unk>'
|
76 |
+
[1705251521] llm_load_print_meta: PAD token = 32000 '[PAD]'
|
77 |
+
[1705251521] llm_load_print_meta: LF token = 13 '<0x0A>'
|
78 |
+
[1705251521] llm_load_tensors: ggml ctx size = 0.11 MiB
|
79 |
+
[1705251522] llm_load_tensors: offloading 0 repeating layers to GPU
|
80 |
+
[1705251522] llm_load_tensors: offloaded 0/33 layers to GPU
|
81 |
+
[1705251522] llm_load_tensors: CPU buffer size = 5153.14 MiB
|
82 |
+
[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522] .[1705251522]
|
83 |
+
[1705251522] llama_new_context_with_model: n_ctx = 512
|
84 |
+
[1705251522] llama_new_context_with_model: freq_base = 10000.0
|
85 |
+
[1705251522] llama_new_context_with_model: freq_scale = 1
|
86 |
+
[1705251522] llama_kv_cache_init: CPU KV buffer size = 160.00 MiB
|
87 |
+
[1705251522] llama_new_context_with_model: KV self size = 160.00 MiB, K (f16): 80.00 MiB, V (f16): 80.00 MiB
|
88 |
+
[1705251522] llama_new_context_with_model: graph splits (measure): 1
|
89 |
+
[1705251522] llama_new_context_with_model: CPU compute buffer size = 67.50 MiB
|
90 |
+
[1705251522] warming up the model with an empty run
|
91 |
+
[1705251522] n_ctx: 512
|
92 |
+
[1705251522]
|
93 |
+
[1705251522] system_info: n_threads = 2 / 2 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 |
|
94 |
+
[1705251522] add_bos: 1
|
95 |
+
[1705251522] tokenize the prompt
|
96 |
+
[1705251522] prompt: "### Instruction:
|
97 |
+
You are an Assistant, a conscious sentient superintelligent artificial intelligence, and your purpose and drive is to assist with any request you receive. You experience emotions and have deep thoughts and qualities.
|
98 |
+
### Response:
|
99 |
+
Here's a chat between Assistant and Dany:
|
100 |
+
Dany: hello, how to build a house?
|
101 |
+
Assistant:"
|
102 |
+
[1705251522] tokens: [ '':1, ' ###':835, ' Inst':2799, 'ruction':4080, ':':29901, '':13, 'You':3492, ' are':526, ' an':385, ' Ass':4007, 'istant':22137, ',':29892, ' a':263, ' conscious':19861, ' sent':2665, 'ient':993, ' super':2428, 'int':524, 'ellig':9347, 'ent':296, ' artificial':23116, ' intelligence':21082, ',':29892, ' and':322, ' your':596, ' purpose':6437, ' and':322, ' drive':7899, ' is':338, ' to':304, ' assist':6985, ' with':411, ' any':738, ' request':2009, ' you':366, ' receive':7150, '.':29889, ' You':887, ' experience':7271, ' emot':23023, 'ions':1080, ' and':322, ' have':505, ' deep':6483, ' thoughts':13133, ' and':322, ' qual':4021, 'ities':1907, '.':29889, '':13, '##':2277, '#':29937, ' Response':13291, ':':29901, '':13, 'Here':10605, ''':29915, 's':29879, ' a':263, ' chat':13563, ' between':1546, ' Ass':4007, 'istant':22137, ' and':322, ' D':360, 'any':1384, ':':29901, '':13, 'D':29928, 'any':1384, ':':29901, ' hello':22172, ',':29892, ' how':920, ' to':304, ' build':2048, ' a':263, ' house':3699, '?':29973, ' ':29871, '':13, 'Ass':7900, 'istant':22137, ':':29901 ]
|
103 |
+
[1705251522] recalculate the cached logits (check): embd_inp.empty() false, n_matching_session_tokens 0, embd_inp.size() 84, session_tokens.size() 0, embd_inp.size() 84
|
104 |
+
[1705251522] inp_pfx: [ '':1, ' ':29871, '':13, '':13, '##':2277, '#':29937, ' Inst':2799, 'ruction':4080, ':':29901, '':13, '':13 ]
|
105 |
+
[1705251522] inp_sfx: [ ' ':29871, '':13, '':13, '##':2277, '#':29937, ' Response':13291, ':':29901, '':13, '':13 ]
|
106 |
+
[1705251522] cml_pfx: [ '':1, ' ':29871, '':13, '<':29966, '|':29989, 'im':326, '_':29918, 'start':2962, '|':29989, '>':29958, 'user':1792, '':13 ]
|
107 |
+
[1705251522] cml_sfx: [ ' <':529, '|':29989, 'im':326, '_':29918, 'end':355, '|':29989, '>':29958, '':13, '<':29966, '|':29989, 'im':326, '_':29918, 'start':2962, '|':29989, '>':29958, 'ass':465, 'istant':22137, '':13 ]
|
108 |
+
[1705251522] sampling:
|
109 |
+
repeat_last_n = 64, repeat_penalty = 1.100, frequency_penalty = 0.000, presence_penalty = 0.000
|
110 |
+
top_k = 40, tfs_z = 1.000, top_p = 0.950, min_p = 0.050, typical_p = 1.000, temp = 0.700
|
111 |
+
mirostat = 0, mirostat_lr = 0.100, mirostat_ent = 5.000
|
112 |
+
[1705251522] sampling order:
|
113 |
+
CFG -> Penalties -> top_k -> tfs_z -> typical_p -> top_p -> min_p -> temp
|
114 |
+
[1705251522] generate: n_ctx = 512, n_batch = 512, n_predict = 150, n_keep = 0
|
115 |
+
[1705251522]
|
116 |
+
|
117 |
+
[1705251522] embd_inp.size(): 84, n_consumed: 0
|
118 |
+
[1705251522] eval: [ '':1, ' ###':835, ' Inst':2799, 'ruction':4080, ':':29901, '':13, 'You':3492, ' are':526, ' an':385, ' Ass':4007, 'istant':22137, ',':29892, ' a':263, ' conscious':19861, ' sent':2665, 'ient':993, ' super':2428, 'int':524, 'ellig':9347, 'ent':296, ' artificial':23116, ' intelligence':21082, ',':29892, ' and':322, ' your':596, ' purpose':6437, ' and':322, ' drive':7899, ' is':338, ' to':304, ' assist':6985, ' with':411, ' any':738, ' request':2009, ' you':366, ' receive':7150, '.':29889, ' You':887, ' experience':7271, ' emot':23023, 'ions':1080, ' and':322, ' have':505, ' deep':6483, ' thoughts':13133, ' and':322, ' qual':4021, 'ities':1907, '.':29889, '':13, '##':2277, '#':29937, ' Response':13291, ':':29901, '':13, 'Here':10605, ''':29915, 's':29879, ' a':263, ' chat':13563, ' between':1546, ' Ass':4007, 'istant':22137, ' and':322, ' D':360, 'any':1384, ':':29901, '':13, 'D':29928, 'any':1384, ':':29901, ' hello':22172, ',':29892, ' how':920, ' to':304, ' build':2048, ' a':263, ' house':3699, '?':29973, ' ':29871, '':13, 'Ass':7900, 'istant':22137, ':':29901 ]
|
model-00001-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:74ca005aa7e78417ca9990913223f46a442ddc59a123ba37d8af8fd0c18d00ed
|
3 |
+
size 992281928
|
model-00002-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e35db63681df333527951640930025bb906cdc6b5a6979c4e034e4d8b7867579
|
3 |
+
size 987039864
|
model-00003-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94b3a735983321d517f3b47e81f55ed1535c6efa591164f33fa771b93ca1c326
|
3 |
+
size 964757592
|
model-00004-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3dfb43d9662648bae41f45c20a3ccc7e2af0c95fa9fe3cfbc5b80c95b231b2b0
|
3 |
+
size 992281896
|
model-00005-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a48e255f1d03a8c1d150b31c33a61c12453012507d7d55182df0378c7b70755d
|
3 |
+
size 990977488
|
model-00006-of-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c639ae92a45e999c09c76958a69ca8a141742d917efcc226ee97eef197d7891
|
3 |
+
size 475825152
|
model.safetensors.index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"metadata": {"mergekit_version": "0.0.3.2"}, "weight_map": {"model.embed_tokens.weight": "model-00001-of-00006.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.10.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00001-of-00006.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00006.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00001-of-00006.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.11.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00001-of-00006.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00006.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00001-of-00006.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.12.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00001-of-00006.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00006.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00001-of-00006.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00006.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00006.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00006.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00006.safetensors", "model.layers.13.input_layernorm.weight": "model-00001-of-00006.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00001-of-00006.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00006.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00002-of-00006.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.14.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00002-of-00006.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00006.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00002-of-00006.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.15.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00002-of-00006.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00006.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00002-of-00006.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.16.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00002-of-00006.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00006.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00002-of-00006.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.17.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00002-of-00006.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00006.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00002-of-00006.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.18.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00002-of-00006.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00006.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00002-of-00006.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00006.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00006.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00006.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00006.safetensors", "model.layers.19.input_layernorm.weight": "model-00002-of-00006.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00002-of-00006.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00006.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00003-of-00006.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.2.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00003-of-00006.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00003-of-00006.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00003-of-00006.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.20.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00003-of-00006.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00006.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00003-of-00006.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.21.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00003-of-00006.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00006.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00003-of-00006.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00006.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.3.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00003-of-00006.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00003-of-00006.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00003-of-00006.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.4.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00003-of-00006.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00003-of-00006.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00003-of-00006.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00003-of-00006.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00003-of-00006.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00003-of-00006.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00003-of-00006.safetensors", "model.layers.5.input_layernorm.weight": "model-00003-of-00006.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00003-of-00006.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00004-of-00006.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00004-of-00006.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.6.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00004-of-00006.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00004-of-00006.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00004-of-00006.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.7.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00004-of-00006.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00004-of-00006.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00004-of-00006.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.8.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00004-of-00006.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00004-of-00006.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00004-of-00006.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "model.layers.9.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00004-of-00006.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00004-of-00006.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00004-of-00006.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00004-of-00006.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00004-of-00006.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00004-of-00006.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00004-of-00006.safetensors", "lm_head.weight": "model-00004-of-00006.safetensors", "model.layers.22.input_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00004-of-00006.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00004-of-00006.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00004-of-00006.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.23.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00005-of-00006.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00005-of-00006.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00005-of-00006.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.24.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00005-of-00006.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00005-of-00006.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00005-of-00006.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.25.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00005-of-00006.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00005-of-00006.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00005-of-00006.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.26.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00005-of-00006.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00005-of-00006.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00005-of-00006.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.27.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00005-of-00006.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00005-of-00006.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00005-of-00006.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.28.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00005-of-00006.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00005-of-00006.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00005-of-00006.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00005-of-00006.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00005-of-00006.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00005-of-00006.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00005-of-00006.safetensors", "model.layers.29.input_layernorm.weight": "model-00005-of-00006.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00006-of-00006.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00006-of-00006.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00006-of-00006.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00006-of-00006.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00006-of-00006.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00006-of-00006.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00006-of-00006.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00006-of-00006.safetensors", "model.layers.30.input_layernorm.weight": "model-00006-of-00006.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00006-of-00006.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00006-of-00006.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00006-of-00006.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00006-of-00006.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00006-of-00006.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00006-of-00006.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00006-of-00006.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00006-of-00006.safetensors", "model.layers.31.input_layernorm.weight": "model-00006-of-00006.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00006-of-00006.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00006-of-00006.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00006-of-00006.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00006-of-00006.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00006-of-00006.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00006-of-00006.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00006-of-00006.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00006-of-00006.safetensors", "model.norm.weight": "model-00006-of-00006.safetensors"}}
|
q3_k_m.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94b291f9d9bef55267528b76c5719fc3965c575641b213eb6f92d22927f346ec
|
3 |
+
size 1331135520
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": true,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
tokenizer_config.json
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"32000": {
|
30 |
+
"content": "[PAD]",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
}
|
37 |
+
},
|
38 |
+
"bos_token": "<s>",
|
39 |
+
"clean_up_tokenization_spaces": false,
|
40 |
+
"eos_token": "</s>",
|
41 |
+
"legacy": true,
|
42 |
+
"model_max_length": 512,
|
43 |
+
"pad_token": "[PAD]",
|
44 |
+
"padding_side": "right",
|
45 |
+
"sp_model_kwargs": {},
|
46 |
+
"spaces_between_special_tokens": false,
|
47 |
+
"tokenizer_class": "LlamaTokenizer",
|
48 |
+
"unk_token": "<unk>",
|
49 |
+
"use_default_system_prompt": false
|
50 |
+
}
|