SmolLM-135M-FakyPedia-EngHeb-BF16.gguf
Browse files- .gitattributes +1 -0
- SmolLM-135M-FakyPedia-EngHeb-BF16.gguf +3 -0
- config.json +1 -1
- model_config.json +49 -0
- tokenizer_config.json +1 -1
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
SmolLM-135M-FakyPedia-EngHeb-BF16.gguf filter=lfs diff=lfs merge=lfs -text
|
SmolLM-135M-FakyPedia-EngHeb-BF16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97f08de7087888b1abba1ea5d71c93748bc30e0ac264947e88a61fd9ef247c62
|
3 |
+
size 286846848
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "Norod78/SmolLM-135M-
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "Norod78/SmolLM-135M-FakyPedia-EngHeb",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
model_config.json
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Config for Chat ID 1723546179511",
|
3 |
+
"load_params": {
|
4 |
+
"n_ctx": 512,
|
5 |
+
"n_batch": 512,
|
6 |
+
"rope_freq_base": 0,
|
7 |
+
"rope_freq_scale": 0,
|
8 |
+
"n_gpu_layers": 20,
|
9 |
+
"use_mlock": true,
|
10 |
+
"main_gpu": 0,
|
11 |
+
"tensor_split": [
|
12 |
+
0
|
13 |
+
],
|
14 |
+
"seed": -1,
|
15 |
+
"f16_kv": true,
|
16 |
+
"use_mmap": true,
|
17 |
+
"no_kv_offload": false,
|
18 |
+
"num_experts_used": 0
|
19 |
+
},
|
20 |
+
"inference_params": {
|
21 |
+
"n_threads": 4,
|
22 |
+
"n_predict": -1,
|
23 |
+
"top_k": 30,
|
24 |
+
"min_p": 0,
|
25 |
+
"top_p": 1,
|
26 |
+
"temp": 0.5,
|
27 |
+
"repeat_penalty": 1.2,
|
28 |
+
"input_prefix": "<|endoftext|>\\\\%",
|
29 |
+
"input_suffix": "",
|
30 |
+
"antiprompt": [],
|
31 |
+
"pre_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
32 |
+
"pre_prompt_suffix": "",
|
33 |
+
"pre_prompt_prefix": "",
|
34 |
+
"seed": -1,
|
35 |
+
"tfs_z": 1,
|
36 |
+
"typical_p": 1,
|
37 |
+
"repeat_last_n": 64,
|
38 |
+
"frequency_penalty": 0,
|
39 |
+
"presence_penalty": 0,
|
40 |
+
"n_keep": 0,
|
41 |
+
"logit_bias": {},
|
42 |
+
"mirostat": 0,
|
43 |
+
"mirostat_tau": 5,
|
44 |
+
"mirostat_eta": 0.1,
|
45 |
+
"memory_f16": true,
|
46 |
+
"multiline_input": false,
|
47 |
+
"penalize_nl": true
|
48 |
+
}
|
49 |
+
}
|
tokenizer_config.json
CHANGED
@@ -159,7 +159,7 @@
|
|
159 |
"<empty_output>"
|
160 |
],
|
161 |
"bos_token": "<|endoftext|>",
|
162 |
-
"chat_template": "{% for message in messages %}{{'<|
|
163 |
"clean_up_tokenization_spaces": false,
|
164 |
"eos_token": "<|endoftext|>",
|
165 |
"errors": "replace",
|
|
|
159 |
"<empty_output>"
|
160 |
],
|
161 |
"bos_token": "<|endoftext|>",
|
162 |
+
"chat_template": "{% for message in messages %}{{'<|endoftext|>\\% + message['role'] + '\n' + message['content'] + '<|endoftext|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
163 |
"clean_up_tokenization_spaces": false,
|
164 |
"eos_token": "<|endoftext|>",
|
165 |
"errors": "replace",
|