Upload folder using huggingface_hub
#2
by
jueduardo
- opened
- .gitattributes +1 -0
- Lora_Fused_Model-8.0B-F16.gguf +3 -0
- README.md +26 -1
- config.json +1 -1
- model-00001-of-00004.safetensors +1 -1
- model-00002-of-00004.safetensors +1 -1
- model-00003-of-00004.safetensors +1 -1
- model-00004-of-00004.safetensors +1 -1
- special_tokens_map.json +1 -1
- tokenizer_config.json +2 -1
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Lora_Fused_Model-8.0B-F16.gguf filter=lfs diff=lfs merge=lfs -text
|
Lora_Fused_Model-8.0B-F16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85eb92c60aebc5fd4f69d04f672baaa745cfc4695b6184e241c3acb7fbe1c207
|
3 |
+
size 16068890976
|
README.md
CHANGED
@@ -174,11 +174,36 @@ extra_gated_fields:
|
|
174 |
extra_gated_description: The information you provide will be collected, stored, processed
|
175 |
and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
|
176 |
extra_gated_button_content: Submit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
---
|
178 |
|
179 |
# jueduardo/Meta-Llama-3-8B-livro-llm
|
180 |
|
181 |
-
The Model [jueduardo/Meta-Llama-3-8B-livro-llm](https://huggingface.co/jueduardo/Meta-Llama-3-8B-livro-llm) was converted to MLX format from [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) using mlx-lm version **0.15.2**.
|
182 |
|
183 |
## Use with mlx
|
184 |
|
|
|
174 |
extra_gated_description: The information you provide will be collected, stored, processed
|
175 |
and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
|
176 |
extra_gated_button_content: Submit
|
177 |
+
widget:
|
178 |
+
- example_title: Hello
|
179 |
+
messages:
|
180 |
+
- role: user
|
181 |
+
content: Hey my name is Julien! How are you?
|
182 |
+
- example_title: Winter holidays
|
183 |
+
messages:
|
184 |
+
- role: system
|
185 |
+
content: You are a helpful and honest assistant. Please, respond concisely and
|
186 |
+
truthfully.
|
187 |
+
- role: user
|
188 |
+
content: Can you recommend a good destination for Winter holidays?
|
189 |
+
- example_title: Programming assistant
|
190 |
+
messages:
|
191 |
+
- role: system
|
192 |
+
content: You are a helpful and honest code and programming assistant. Please,
|
193 |
+
respond concisely and truthfully.
|
194 |
+
- role: user
|
195 |
+
content: Write a function that computes the nth fibonacci number.
|
196 |
+
inference:
|
197 |
+
parameters:
|
198 |
+
max_new_tokens: 300
|
199 |
+
stop:
|
200 |
+
- <|end_of_text|>
|
201 |
+
- <|eot_id|>
|
202 |
---
|
203 |
|
204 |
# jueduardo/Meta-Llama-3-8B-livro-llm
|
205 |
|
206 |
+
The Model [jueduardo/Meta-Llama-3-8B-livro-llm](https://huggingface.co/jueduardo/Meta-Llama-3-8B-livro-llm) was converted to MLX format from [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) using mlx-lm version **0.15.2**.
|
207 |
|
208 |
## Use with mlx
|
209 |
|
config.json
CHANGED
@@ -5,7 +5,7 @@
|
|
5 |
"attention_bias": false,
|
6 |
"attention_dropout": 0.0,
|
7 |
"bos_token_id": 128000,
|
8 |
-
"eos_token_id":
|
9 |
"hidden_act": "silu",
|
10 |
"hidden_size": 4096,
|
11 |
"initializer_range": 0.02,
|
|
|
5 |
"attention_bias": false,
|
6 |
"attention_dropout": 0.0,
|
7 |
"bos_token_id": 128000,
|
8 |
+
"eos_token_id": 128009,
|
9 |
"hidden_act": "silu",
|
10 |
"hidden_size": 4096,
|
11 |
"initializer_range": 0.02,
|
model-00001-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5295466428
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc8280d655bdc5fa35c1cf7db17538d3b8d62c9b871545c7f25c44589f0d56a3
|
3 |
size 5295466428
|
model-00002-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5352157832
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f11aecf22b1fcc8a29da0b6631fc7c84aeda48c77fc58cffedd7da25fe8bf63
|
3 |
size 5352157832
|
model-00003-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4362258775
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66f8139f5e8368c2a252f5d699a4ae4db09676214266b711d4d4b36b10721983
|
3 |
size 4362258775
|
model-00004-of-00004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1050673279
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:638452373abba2c197f5d86fae5d836e844e2aeffac0ec275b9f6b1162d524a0
|
3 |
size 1050673279
|
special_tokens_map.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"single_word": false
|
8 |
},
|
9 |
"eos_token": {
|
10 |
-
"content": "<|
|
11 |
"lstrip": false,
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
|
|
7 |
"single_word": false
|
8 |
},
|
9 |
"eos_token": {
|
10 |
+
"content": "<|eot_id|>",
|
11 |
"lstrip": false,
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
tokenizer_config.json
CHANGED
@@ -2050,8 +2050,9 @@
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
|
|
2053 |
"clean_up_tokenization_spaces": true,
|
2054 |
-
"eos_token": "<|
|
2055 |
"model_input_names": [
|
2056 |
"input_ids",
|
2057 |
"attention_mask"
|
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
2053 |
+
"chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
+
"eos_token": "<|eot_id|>",
|
2056 |
"model_input_names": [
|
2057 |
"input_ids",
|
2058 |
"attention_mask"
|