swritchie commited on
Commit
400802c
1 Parent(s): a601f63

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: keras-nlp
3
+ pipeline_tag: text-classification
4
+ ---
5
+ This is a [`Bert` model](https://keras.io/api/keras_nlp/models/bert) uploaded using the KerasNLP library and can be used with JAX, TensorFlow, and PyTorch backends.
6
+ This model is related to a `Classifier` task.
7
+
8
+ Model config:
9
+ * **name:** bert_backbone
10
+ * **trainable:** True
11
+ * **vocabulary_size:** 30522
12
+ * **num_layers:** 2
13
+ * **num_heads:** 2
14
+ * **hidden_dim:** 128
15
+ * **intermediate_dim:** 512
16
+ * **dropout:** 0.1
17
+ * **max_sequence_length:** 512
18
+ * **num_segments:** 2
19
+
20
+ This model card has been generated automatically and should be completed by the model author. See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for more information.
assets/tokenizer/vocabulary.txt ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.bert.bert_backbone",
3
+ "class_name": "BertBackbone",
4
+ "config": {
5
+ "name": "bert_backbone",
6
+ "trainable": true,
7
+ "vocabulary_size": 30522,
8
+ "num_layers": 2,
9
+ "num_heads": 2,
10
+ "hidden_dim": 128,
11
+ "intermediate_dim": 512,
12
+ "dropout": 0.1,
13
+ "max_sequence_length": 512,
14
+ "num_segments": 2
15
+ },
16
+ "registered_name": "keras_nlp>BertBackbone"
17
+ }
metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "keras_version": "3.4.1",
3
+ "keras_nlp_version": "0.14.4",
4
+ "parameter_count": 4385920,
5
+ "date_saved": "2024-08-21@13:41:50"
6
+ }
model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4eb3d33c2b852033f1929f1e43df8a1b11e0775f2d493a9c3d35f8731134501
3
+ size 17632104
preprocessor.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.bert.bert_preprocessor",
3
+ "class_name": "BertPreprocessor",
4
+ "config": {
5
+ "name": "bert_preprocessor",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "float32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "tokenizer": {
16
+ "module": "keras_nlp.src.models.bert.bert_tokenizer",
17
+ "class_name": "BertTokenizer",
18
+ "config": {
19
+ "name": "bert_tokenizer",
20
+ "trainable": true,
21
+ "dtype": {
22
+ "module": "keras",
23
+ "class_name": "DTypePolicy",
24
+ "config": {
25
+ "name": "int32"
26
+ },
27
+ "registered_name": null
28
+ },
29
+ "vocabulary": null,
30
+ "sequence_length": null,
31
+ "lowercase": true,
32
+ "strip_accents": false,
33
+ "split": true,
34
+ "suffix_indicator": "##",
35
+ "oov_token": "[UNK]"
36
+ },
37
+ "registered_name": "keras_nlp>BertTokenizer"
38
+ },
39
+ "sequence_length": 512,
40
+ "truncate": "round_robin"
41
+ },
42
+ "registered_name": "keras_nlp>BertPreprocessor"
43
+ }
task.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.bert.bert_classifier",
3
+ "class_name": "BertClassifier",
4
+ "config": {
5
+ "backbone": {
6
+ "module": "keras_nlp.src.models.bert.bert_backbone",
7
+ "class_name": "BertBackbone",
8
+ "config": {
9
+ "name": "bert_backbone",
10
+ "trainable": true,
11
+ "vocabulary_size": 30522,
12
+ "num_layers": 2,
13
+ "num_heads": 2,
14
+ "hidden_dim": 128,
15
+ "intermediate_dim": 512,
16
+ "dropout": 0.1,
17
+ "max_sequence_length": 512,
18
+ "num_segments": 2
19
+ },
20
+ "registered_name": "keras_nlp>BertBackbone"
21
+ },
22
+ "preprocessor": {
23
+ "module": "keras_nlp.src.models.bert.bert_preprocessor",
24
+ "class_name": "BertPreprocessor",
25
+ "config": {
26
+ "name": "bert_preprocessor",
27
+ "trainable": true,
28
+ "dtype": {
29
+ "module": "keras",
30
+ "class_name": "DTypePolicy",
31
+ "config": {
32
+ "name": "float32"
33
+ },
34
+ "registered_name": null
35
+ },
36
+ "tokenizer": {
37
+ "module": "keras_nlp.src.models.bert.bert_tokenizer",
38
+ "class_name": "BertTokenizer",
39
+ "config": {
40
+ "name": "bert_tokenizer",
41
+ "trainable": true,
42
+ "dtype": {
43
+ "module": "keras",
44
+ "class_name": "DTypePolicy",
45
+ "config": {
46
+ "name": "int32"
47
+ },
48
+ "registered_name": null
49
+ },
50
+ "vocabulary": null,
51
+ "sequence_length": null,
52
+ "lowercase": true,
53
+ "strip_accents": false,
54
+ "split": true,
55
+ "suffix_indicator": "##",
56
+ "oov_token": "[UNK]"
57
+ },
58
+ "registered_name": "keras_nlp>BertTokenizer"
59
+ },
60
+ "sequence_length": 512,
61
+ "truncate": "round_robin"
62
+ },
63
+ "registered_name": "keras_nlp>BertPreprocessor"
64
+ },
65
+ "name": "bert_classifier",
66
+ "num_classes": 2,
67
+ "activation": "linear",
68
+ "dropout": 0.1
69
+ },
70
+ "registered_name": "keras_nlp>BertClassifier"
71
+ }
task.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4cdebec41b92b7d63ec411b56d13737d06f43c61d6a9d5f73a3f15a8d324b3e
3
+ size 52766840
tokenizer.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.src.models.bert.bert_tokenizer",
3
+ "class_name": "BertTokenizer",
4
+ "config": {
5
+ "name": "bert_tokenizer",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "int32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "vocabulary": null,
16
+ "sequence_length": null,
17
+ "lowercase": true,
18
+ "strip_accents": false,
19
+ "split": true,
20
+ "suffix_indicator": "##",
21
+ "oov_token": "[UNK]"
22
+ },
23
+ "registered_name": "keras_nlp>BertTokenizer"
24
+ }