BioMike commited on
Commit
dda1f93
1 Parent(s): 550c8d3

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
gliner_config.json ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "class_token_index": -1,
3
+ "dropout": 0.3,
4
+ "embed_ent_token": true,
5
+ "encoder_config": {
6
+ "_name_or_path": "microsoft/deberta-v3-base",
7
+ "add_cross_attention": false,
8
+ "architectures": null,
9
+ "attention_probs_dropout_prob": 0.1,
10
+ "bad_words_ids": null,
11
+ "begin_suppress_tokens": null,
12
+ "bos_token_id": null,
13
+ "chunk_size_feed_forward": 0,
14
+ "cross_attention_hidden_size": null,
15
+ "decoder_start_token_id": null,
16
+ "diversity_penalty": 0.0,
17
+ "do_sample": false,
18
+ "early_stopping": false,
19
+ "encoder_no_repeat_ngram_size": 0,
20
+ "eos_token_id": null,
21
+ "exponential_decay_length_penalty": null,
22
+ "finetuning_task": null,
23
+ "forced_bos_token_id": null,
24
+ "forced_eos_token_id": null,
25
+ "hidden_act": "gelu",
26
+ "hidden_dropout_prob": 0.1,
27
+ "hidden_size": 768,
28
+ "id2label": {
29
+ "0": "LABEL_0",
30
+ "1": "LABEL_1"
31
+ },
32
+ "initializer_range": 0.02,
33
+ "intermediate_size": 3072,
34
+ "is_decoder": false,
35
+ "is_encoder_decoder": false,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1
39
+ },
40
+ "layer_norm_eps": 1e-07,
41
+ "length_penalty": 1.0,
42
+ "max_length": 20,
43
+ "max_position_embeddings": 512,
44
+ "max_relative_positions": -1,
45
+ "min_length": 0,
46
+ "model_type": "deberta-v2",
47
+ "no_repeat_ngram_size": 0,
48
+ "norm_rel_ebd": "layer_norm",
49
+ "num_attention_heads": 12,
50
+ "num_beam_groups": 1,
51
+ "num_beams": 1,
52
+ "num_hidden_layers": 12,
53
+ "num_return_sequences": 1,
54
+ "output_attentions": false,
55
+ "output_hidden_states": false,
56
+ "output_scores": false,
57
+ "pad_token_id": 0,
58
+ "pooler_dropout": 0,
59
+ "pooler_hidden_act": "gelu",
60
+ "pooler_hidden_size": 768,
61
+ "pos_att_type": [
62
+ "p2c",
63
+ "c2p"
64
+ ],
65
+ "position_biased_input": false,
66
+ "position_buckets": 256,
67
+ "prefix": null,
68
+ "problem_type": null,
69
+ "pruned_heads": {},
70
+ "relative_attention": true,
71
+ "remove_invalid_values": false,
72
+ "repetition_penalty": 1.0,
73
+ "return_dict": true,
74
+ "return_dict_in_generate": false,
75
+ "sep_token_id": null,
76
+ "share_att_key": true,
77
+ "suppress_tokens": null,
78
+ "task_specific_params": null,
79
+ "temperature": 1.0,
80
+ "tf_legacy_loss": false,
81
+ "tie_encoder_decoder": false,
82
+ "tie_word_embeddings": true,
83
+ "tokenizer_class": null,
84
+ "top_k": 50,
85
+ "top_p": 1.0,
86
+ "torch_dtype": null,
87
+ "torchscript": false,
88
+ "type_vocab_size": 0,
89
+ "typical_p": 1.0,
90
+ "use_bfloat16": false,
91
+ "vocab_size": 128100
92
+ },
93
+ "ent_token": "<<ENT>>",
94
+ "eval_every": 5000,
95
+ "fine_tune": true,
96
+ "freeze_token_rep": false,
97
+ "fuse_layers": false,
98
+ "has_rnn": true,
99
+ "hidden_size": 768,
100
+ "label_smoothing": 0,
101
+ "labels_encoder": "BAAI/bge-small-en-v1.5",
102
+ "labels_encoder_config": {
103
+ "_name_or_path": "BAAI/bge-small-en-v1.5",
104
+ "add_cross_attention": false,
105
+ "architectures": [
106
+ "BertModel"
107
+ ],
108
+ "attention_probs_dropout_prob": 0.1,
109
+ "bad_words_ids": null,
110
+ "begin_suppress_tokens": null,
111
+ "bos_token_id": null,
112
+ "chunk_size_feed_forward": 0,
113
+ "classifier_dropout": null,
114
+ "cross_attention_hidden_size": null,
115
+ "decoder_start_token_id": null,
116
+ "diversity_penalty": 0.0,
117
+ "do_sample": false,
118
+ "early_stopping": false,
119
+ "encoder_no_repeat_ngram_size": 0,
120
+ "eos_token_id": null,
121
+ "exponential_decay_length_penalty": null,
122
+ "finetuning_task": null,
123
+ "forced_bos_token_id": null,
124
+ "forced_eos_token_id": null,
125
+ "hidden_act": "gelu",
126
+ "hidden_dropout_prob": 0.1,
127
+ "hidden_size": 384,
128
+ "id2label": {
129
+ "0": "LABEL_0"
130
+ },
131
+ "initializer_range": 0.02,
132
+ "intermediate_size": 1536,
133
+ "is_decoder": false,
134
+ "is_encoder_decoder": false,
135
+ "label2id": {
136
+ "LABEL_0": 0
137
+ },
138
+ "layer_norm_eps": 1e-12,
139
+ "length_penalty": 1.0,
140
+ "max_length": 20,
141
+ "max_position_embeddings": 512,
142
+ "min_length": 0,
143
+ "model_type": "bert",
144
+ "no_repeat_ngram_size": 0,
145
+ "num_attention_heads": 12,
146
+ "num_beam_groups": 1,
147
+ "num_beams": 1,
148
+ "num_hidden_layers": 12,
149
+ "num_return_sequences": 1,
150
+ "output_attentions": false,
151
+ "output_hidden_states": false,
152
+ "output_scores": false,
153
+ "pad_token_id": 0,
154
+ "position_embedding_type": "absolute",
155
+ "prefix": null,
156
+ "problem_type": null,
157
+ "pruned_heads": {},
158
+ "remove_invalid_values": false,
159
+ "repetition_penalty": 1.0,
160
+ "return_dict": true,
161
+ "return_dict_in_generate": false,
162
+ "sep_token_id": null,
163
+ "suppress_tokens": null,
164
+ "task_specific_params": null,
165
+ "temperature": 1.0,
166
+ "tf_legacy_loss": false,
167
+ "tie_encoder_decoder": false,
168
+ "tie_word_embeddings": true,
169
+ "tokenizer_class": null,
170
+ "top_k": 50,
171
+ "top_p": 1.0,
172
+ "torch_dtype": "float32",
173
+ "torchscript": false,
174
+ "type_vocab_size": 2,
175
+ "typical_p": 1.0,
176
+ "use_bfloat16": false,
177
+ "use_cache": true,
178
+ "vocab_size": 30522
179
+ },
180
+ "log_dir": "deberta/",
181
+ "loss_alpha": 0.8,
182
+ "loss_gamma": 2,
183
+ "loss_reduction": "sum",
184
+ "lr_encoder": "1e-5",
185
+ "lr_others": "5e-5",
186
+ "max_grad_norm": 10.0,
187
+ "max_len": 512,
188
+ "max_neg_type_ratio": 1,
189
+ "max_types": 100,
190
+ "max_width": 12,
191
+ "model_name": "microsoft/deberta-v3-base",
192
+ "model_type": "gliner",
193
+ "name": "span level gliner",
194
+ "num_post_fusion_layers": 1,
195
+ "num_steps": 100000,
196
+ "post_fusion_schema": "",
197
+ "prev_path": null,
198
+ "random_drop": true,
199
+ "root_dir": "gliner_logs",
200
+ "save_total_limit": 3,
201
+ "scheduler_type": "cosine",
202
+ "sep_token": "<<SEP>>",
203
+ "shuffle_types": true,
204
+ "size_sup": -1,
205
+ "span_mode": "markerV0",
206
+ "subtoken_pooling": "first",
207
+ "train_batch_size": 8,
208
+ "train_data": "data/nuner_train.json",
209
+ "transformers_version": "4.45.2",
210
+ "val_data_dir": "none",
211
+ "vocab_size": -1,
212
+ "warmup_ratio": 0.05,
213
+ "weight_decay_encoder": 0.1,
214
+ "weight_decay_other": 0.1,
215
+ "words_splitter_type": "whitespace"
216
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d24a13efabd314887b38c5a966633c7d895a92e22b1ee52b4c46fd1570cc23e9
3
+ size 1909053417
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d212230a8a2ae24150c8cc4424922c984b809932cce51cfac405b52ffa4e029
3
+ size 969281034
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8fdef4d377b5858ebcbd8b06cdffe099995228c90ad6e65d556b6b0534cb434
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ecce1cf44fdef494e7d73e802dda7f67b84fe37ecc207ba134f042b42b58f7
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "128000": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": false,
48
+ "eos_token": "[SEP]",
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "sp_model_kwargs": {},
54
+ "split_by_punct": false,
55
+ "tokenizer_class": "DebertaV2Tokenizer",
56
+ "unk_token": "[UNK]",
57
+ "vocab_type": "spm"
58
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff