Text Generation
Transformers
PyTorch
Safetensors
English
llama
conversational
text-generation-inference
Inference Endpoints
hamishivi alvarobartt HF staff commited on
Commit
424fbfc
1 Parent(s): d9a7ee8

Add indentation (4 spaces) to `tokenizer_config.json` for readability (#5)

Browse files

- Add indentation (4 spaces) to `tokenizer_config.json` for readability (798b2b8eedf2ef4976988c53052dd3d193c3de68)


Co-authored-by: Alvaro Bartolome <[email protected]>

Files changed (1) hide show
  1. tokenizer_config.json +35 -1
tokenizer_config.json CHANGED
@@ -1 +1,35 @@
1
- {"add_bos_token": true, "add_eos_token": false, "model_max_length": 2048, "pad_token": null, "sp_model_kwargs": {}, "tokenizer_class": "LlamaTokenizer", "clean_up_tokenization_spaces": false, "bos_token": {"__type": "AddedToken", "content": "<s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "eos_token": {"__type": "AddedToken", "content": "</s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "unk_token": {"__type": "AddedToken", "content": "<unk>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token":true,
3
+ "add_eos_token":false,
4
+ "model_max_length":2048,
5
+ "pad_token":null,
6
+ "sp_model_kwargs":{
7
+
8
+ },
9
+ "tokenizer_class":"LlamaTokenizer",
10
+ "clean_up_tokenization_spaces":false,
11
+ "bos_token":{
12
+ "__type":"AddedToken",
13
+ "content":"<s>",
14
+ "lstrip":false,
15
+ "normalized":true,
16
+ "rstrip":false,
17
+ "single_word":false
18
+ },
19
+ "eos_token":{
20
+ "__type":"AddedToken",
21
+ "content":"</s>",
22
+ "lstrip":false,
23
+ "normalized":true,
24
+ "rstrip":false,
25
+ "single_word":false
26
+ },
27
+ "unk_token":{
28
+ "__type":"AddedToken",
29
+ "content":"<unk>",
30
+ "lstrip":false,
31
+ "normalized":true,
32
+ "rstrip":false,
33
+ "single_word":false
34
+ }
35
+ }