Upload tokenizer
Browse files- README.md +3 -3
- tokenizer.json +0 -0
- tokenizer_config.json +0 -2
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
license: apache-2.0
|
3 |
base_model: google-bert/bert-base-uncased
|
4 |
-
|
5 |
-
- generated_from_trainer
|
6 |
metrics:
|
7 |
- accuracy
|
|
|
|
|
8 |
model-index:
|
9 |
- name: phishing-IBM
|
10 |
results: []
|
|
|
1 |
---
|
|
|
2 |
base_model: google-bert/bert-base-uncased
|
3 |
+
license: apache-2.0
|
|
|
4 |
metrics:
|
5 |
- accuracy
|
6 |
+
tags:
|
7 |
+
- generated_from_trainer
|
8 |
model-index:
|
9 |
- name: phishing-IBM
|
10 |
results: []
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -43,11 +43,9 @@
|
|
43 |
},
|
44 |
"clean_up_tokenization_spaces": true,
|
45 |
"cls_token": "[CLS]",
|
46 |
-
"do_basic_tokenize": true,
|
47 |
"do_lower_case": true,
|
48 |
"mask_token": "[MASK]",
|
49 |
"model_max_length": 512,
|
50 |
-
"never_split": null,
|
51 |
"pad_token": "[PAD]",
|
52 |
"sep_token": "[SEP]",
|
53 |
"strip_accents": null,
|
|
|
43 |
},
|
44 |
"clean_up_tokenization_spaces": true,
|
45 |
"cls_token": "[CLS]",
|
|
|
46 |
"do_lower_case": true,
|
47 |
"mask_token": "[MASK]",
|
48 |
"model_max_length": 512,
|
|
|
49 |
"pad_token": "[PAD]",
|
50 |
"sep_token": "[SEP]",
|
51 |
"strip_accents": null,
|