prince-canuma
commited on
Commit
•
6a30415
1
Parent(s):
686f049
Upload folder using huggingface_hub
Browse files- README.md +4 -1
- chat_template.json +3 -0
- model-00001-of-00002.safetensors +1 -1
- model-00002-of-00002.safetensors +1 -1
- processor_config.json +6 -0
- tokenizer.json +0 -0
- tokenizer_config.json +2 -0
README.md
CHANGED
@@ -1,5 +1,8 @@
|
|
1 |
---
|
|
|
|
|
2 |
license: apache-2.0
|
|
|
3 |
tags:
|
4 |
- vision
|
5 |
- image-text-to-text
|
@@ -7,7 +10,7 @@ tags:
|
|
7 |
---
|
8 |
|
9 |
# mlx-community/llava-v1.6-mistral-7b-8bit
|
10 |
-
This model was converted to MLX format from [`llava-hf/llava-v1.6-mistral-7b-hf`]() using mlx-vlm version **0.0.
|
11 |
Refer to the [original model card](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) for more details on the model.
|
12 |
## Use with mlx
|
13 |
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
license: apache-2.0
|
5 |
+
pipeline_tag: image-text-to-text
|
6 |
tags:
|
7 |
- vision
|
8 |
- image-text-to-text
|
|
|
10 |
---
|
11 |
|
12 |
# mlx-community/llava-v1.6-mistral-7b-8bit
|
13 |
+
This model was converted to MLX format from [`llava-hf/llava-v1.6-mistral-7b-hf`]() using mlx-vlm version **0.0.15**.
|
14 |
Refer to the [original model card](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) for more details on the model.
|
15 |
## Use with mlx
|
16 |
|
chat_template.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{ '<<SYS>>\n' + message['content'][0]['text'] + '\n<</SYS>>\n\n' }}{% elif message['role'] == 'user' %}{{ '[INST] ' }}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] }}{% endfor %}{{' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'][0]['text'] + '<\\s> '}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"
|
3 |
+
}
|
model-00001-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5356569646
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6086edf7dbf7c4cdde9f40e4a4cfb9316b733e40684bd110a980c62b32bf03cc
|
3 |
size 5356569646
|
model-00002-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2684420182
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:121502f581680b5baca54bf9e98fd7774b1aeed8fd979d85eb975daad07e667d
|
3 |
size 2684420182
|
processor_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"image_token": "<image>",
|
3 |
+
"patch_size": null,
|
4 |
+
"processor_class": "LlavaNextProcessor",
|
5 |
+
"vision_feature_select_strategy": null
|
6 |
+
}
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
|
|
4 |
"added_tokens_decoder": {
|
5 |
"0": {
|
6 |
"content": "<unk>",
|
@@ -48,6 +49,7 @@
|
|
48 |
"chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
|
49 |
"clean_up_tokenization_spaces": false,
|
50 |
"eos_token": "</s>",
|
|
|
51 |
"max_length": null,
|
52 |
"model_max_length": 1000000000000000019884624838656,
|
53 |
"pad_to_multiple_of": null,
|
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
4 |
+
"add_prefix_space": null,
|
5 |
"added_tokens_decoder": {
|
6 |
"0": {
|
7 |
"content": "<unk>",
|
|
|
49 |
"chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
|
50 |
"clean_up_tokenization_spaces": false,
|
51 |
"eos_token": "</s>",
|
52 |
+
"legacy": true,
|
53 |
"max_length": null,
|
54 |
"model_max_length": 1000000000000000019884624838656,
|
55 |
"pad_to_multiple_of": null,
|