justicea83
commited on
Commit
•
122f390
1
Parent(s):
66024d0
Upload tokenizer
Browse files- README.md +15 -13
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer_config.json +42 -0
README.md
CHANGED
@@ -1,25 +1,27 @@
|
|
1 |
---
|
2 |
-
extra_gated_heading: Access Llama 2 on Hugging Face
|
3 |
-
extra_gated_description: >-
|
4 |
-
This is a form to enable access to Llama 2 on Hugging Face after you have been
|
5 |
-
granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our
|
6 |
-
license terms and acceptable use policy before submitting this form. Requests
|
7 |
-
will be processed in 1-2 days.
|
8 |
-
extra_gated_prompt: "**Your Hugging Face account email address MUST match the email you provide on the Meta website, or your request will not be approved.**"
|
9 |
-
extra_gated_button_content: Submit
|
10 |
-
extra_gated_fields:
|
11 |
-
I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website: checkbox
|
12 |
language:
|
13 |
- en
|
14 |
-
pipeline_tag: text-generation
|
15 |
-
inference: false
|
16 |
-
arxiv: 2307.09288
|
17 |
tags:
|
18 |
- facebook
|
19 |
- meta
|
20 |
- pytorch
|
21 |
- llama
|
22 |
- llama-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
---
|
24 |
# **Llama 2**
|
25 |
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
|
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
language:
|
3 |
- en
|
|
|
|
|
|
|
4 |
tags:
|
5 |
- facebook
|
6 |
- meta
|
7 |
- pytorch
|
8 |
- llama
|
9 |
- llama-2
|
10 |
+
extra_gated_heading: Access Llama 2 on Hugging Face
|
11 |
+
extra_gated_description: This is a form to enable access to Llama 2 on Hugging Face
|
12 |
+
after you have been granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads)
|
13 |
+
and accept our license terms and acceptable use policy before submitting this form.
|
14 |
+
Requests will be processed in 1-2 days.
|
15 |
+
extra_gated_prompt: '**Your Hugging Face account email address MUST match the email
|
16 |
+
you provide on the Meta website, or your request will not be approved.**'
|
17 |
+
extra_gated_button_content: Submit
|
18 |
+
extra_gated_fields:
|
19 |
+
? I agree to share my name, email address and username with Meta and confirm that
|
20 |
+
I have already been granted download access on the Meta website
|
21 |
+
: checkbox
|
22 |
+
pipeline_tag: text-generation
|
23 |
+
inference: false
|
24 |
+
arxiv: 2307.09288
|
25 |
---
|
26 |
# **Llama 2**
|
27 |
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<unk>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"bos_token": "<s>",
|
31 |
+
"chat_template": "{% for message in messages %}{% if message['role'] == 'function_metadata' %}LLaMA Correct User: You have access to the following functions. Use them if required:\n\n{{ message['content'] }}\n\n{% elif message['role'] == 'user' and loop.index0 == 1 %}{{ message['content'] }}{{ eos_token }}GPT4 Correct Assistant:\n\n{% elif message['role'] == 'assistant' %}{{ message['content'] }}{{ eos_token }}GPT4 Correct User: {% elif message['role'] == 'function_call' %}Function call: {{ message['content'] }}{{ eos_token }}GPT4 Correct User: {% elif message['role'] == 'function_response' %}Here is the response to the function call. If helpful, use it to respond to my question/request:\n\n{{ message['content'] }}{{ eos_token }}GPT4 Correct Assistant:\n\n{% elif message['role'] == 'user' and loop.index0 != 1 %}{{ message['content'] }}{{ eos_token }}LLaMA Correct Assistant:\n\n{% endif %}{% endfor %}",
|
32 |
+
"clean_up_tokenization_spaces": false,
|
33 |
+
"eos_token": "</s>",
|
34 |
+
"legacy": false,
|
35 |
+
"model_max_length": 1000000000000000019884624838656,
|
36 |
+
"pad_token": "<unk>",
|
37 |
+
"padding_side": "right",
|
38 |
+
"sp_model_kwargs": {},
|
39 |
+
"tokenizer_class": "LlamaTokenizer",
|
40 |
+
"unk_token": "<unk>",
|
41 |
+
"use_default_system_prompt": false
|
42 |
+
}
|