jhsmith commited on
Commit
020beb1
1 Parent(s): 5ccdc01

Upload folder using huggingface_hub

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+
8
+ ---
9
+
10
+ # {MODEL_NAME}
11
+
12
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
13
+
14
+ <!--- Describe your model here -->
15
+
16
+ ## Usage (Sentence-Transformers)
17
+
18
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
19
+
20
+ ```
21
+ pip install -U sentence-transformers
22
+ ```
23
+
24
+ Then you can use the model like this:
25
+
26
+ ```python
27
+ from sentence_transformers import SentenceTransformer
28
+ sentences = ["This is an example sentence", "Each sentence is converted"]
29
+
30
+ model = SentenceTransformer('{MODEL_NAME}')
31
+ embeddings = model.encode(sentences)
32
+ print(embeddings)
33
+ ```
34
+
35
+
36
+
37
+ ## Evaluation Results
38
+
39
+ <!--- Describe how your model was evaluated -->
40
+
41
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
42
+
43
+
44
+ ## Training
45
+ The model was trained with the parameters:
46
+
47
+ **DataLoader**:
48
+
49
+ `torch.utils.data.dataloader.DataLoader` of length 3057 with parameters:
50
+ ```
51
+ {'batch_size': 10, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
52
+ ```
53
+
54
+ **Loss**:
55
+
56
+ `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
57
+
58
+ Parameters of the fit()-Method:
59
+ ```
60
+ {
61
+ "epochs": 1,
62
+ "evaluation_steps": 400,
63
+ "evaluator": "sentence_transformers.evaluation.BinaryClassificationEvaluator.BinaryClassificationEvaluator",
64
+ "max_grad_norm": 1,
65
+ "optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
66
+ "optimizer_params": {
67
+ "lr": 0.0001
68
+ },
69
+ "scheduler": "warmuplinear",
70
+ "steps_per_epoch": null,
71
+ "warmup_steps": 305,
72
+ "weight_decay": 0.01
73
+ }
74
+ ```
75
+
76
+
77
+ ## Full Model Architecture
78
+ ```
79
+ SentenceTransformer(
80
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel
81
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
82
+ (2): Normalize()
83
+ )
84
+ ```
85
+
86
+ ## Citing & Authors
87
+
88
+ <!--- Describe where people can find more information -->
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/.cache/torch/sentence_transformers/jhsmith_finetuning_mixed/",
3
+ "architectures": [
4
+ "MPNetModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "mpnet",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "relative_attention_num_buckets": 32,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.35.2",
23
+ "vocab_size": 30527
24
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.0.0",
4
+ "transformers": "4.6.1",
5
+ "pytorch": "1.8.1"
6
+ }
7
+ }
eval/binary_classification_evaluation_results_results.csv ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ epoch,steps,cossim_accuracy,cossim_accuracy_threshold,cossim_f1,cossim_precision,cossim_recall,cossim_f1_threshold,cossim_ap,manhattan_accuracy,manhattan_accuracy_threshold,manhattan_f1,manhattan_precision,manhattan_recall,manhattan_f1_threshold,manhattan_ap,euclidean_accuracy,euclidean_accuracy_threshold,euclidean_f1,euclidean_precision,euclidean_recall,euclidean_f1_threshold,euclidean_ap,dot_accuracy,dot_accuracy_threshold,dot_f1,dot_precision,dot_recall,dot_f1_threshold,dot_ap
2
+ 0,400,0.8557986536107711,0.712531328201294,0.28438877290651826,0.27184035476718404,0.29815175097276264,0.3898671567440033,0.2867727777215104,0.8556456548347613,16.476024627685547,0.29102627664848785,0.2967644084934277,0.28550583657587547,24.510189056396484,0.2932845253648341,0.8557986536107711,0.7582442164421082,0.28438877290651826,0.27184035476718404,0.29815175097276264,1.1046563386917114,0.2868010627035612,0.8557986536107711,0.7125314474105835,0.28438877290651826,0.27184035476718404,0.29815175097276264,0.3898671567440033,0.28692984251148124
3
+ 0,800,0.8428702570379437,1.0,0.2716996099689297,0.15721826945145742,0.9995136186770428,0.35268521308898926,0.1500019182022798,0.842640758873929,0.00012805877486243844,0.2716996099689297,0.15721826945145742,0.9995136186770428,24.64952278137207,0.15078744017486995,0.842640758873929,5.8464715948503e-06,0.2716996099689297,0.15721826945145742,0.9995136186770428,1.1378178596496582,0.15035849029339948,0.8428702570379437,1.0000001192092896,0.2716996099689297,0.15721826945145742,0.9995136186770428,0.35268521308898926,0.15038981915918756
4
+ 0,1200,0.8429467564259486,1.0,0.2722436494214656,0.15767986442766907,0.9956225680933852,0.346087247133255,0.1897098741826344,0.842640758873929,0.00010429143003420904,0.2736872146118722,0.16036789297658863,0.9328793774319066,24.714378356933594,0.1896405334374167,0.842640758873929,4.751912456413265e-06,0.2722436494214656,0.15767986442766907,0.9956225680933852,1.143601894378662,0.18968782777027082,0.8427172582619339,1.0,0.2722436494214656,0.15767986442766907,0.9956225680933852,0.3460873067378998,0.1897612357006916
5
+ 0,1600,0.8429467564259486,1.0,0.2719153159113463,0.15736273834137376,0.9995136186770428,0.328864723443985,0.14089144735150053,0.8427172582619339,0.0011735885636880994,0.27188574451203384,0.1573308846036119,1.0,25.10045051574707,0.1429735777222008,0.8427172582619339,5.310723645379767e-05,0.2719153159113463,0.15736273834137376,0.9995136186770428,1.1585640907287598,0.14109712751872056,0.8428702570379437,1.0,0.27188574451203384,0.1573308846036119,1.0,0.3288617730140686,0.1408232471223858
6
+ 0,2000,0.8428702570379437,1.0,0.2720656345110493,0.157451370807168,1.0,0.391846239566803,0.14757150022184637,0.8427937576499388,0.00017429243598598987,0.2721556688066715,0.15751168313797595,1.0,23.77460479736328,0.15719735243666408,0.8427937576499388,7.854174327803776e-06,0.27213765718067506,0.15749961697563966,1.0,1.1028631925582886,0.14908931786498772,0.842640758873929,1.0000001192092896,0.27204763479986765,0.15743931388314572,1.0,0.39184626936912537,0.14718653051153777
7
+ 0,2400,0.8429467564259486,1.0,0.2718497950548724,0.15730680948737566,1.0,0.3650091290473938,0.1338660087386431,0.8428702570379437,0.00013009202666580677,0.27175350436392487,0.15725436179981633,0.9995136186770428,24.400989532470703,0.13664960875036225,0.8428702570379437,5.82129905524198e-06,0.2718497950548724,0.15730680948737566,1.0,1.1269347667694092,0.13539644330182293,0.8429467564259486,1.0,0.27173553719008264,0.15724232917591247,0.9995136186770428,0.36500951647758484,0.13407733659470913
8
+ 0,2800,0.8429467564259486,1.0,0.2716996099689297,0.15721826945145742,0.9995136186770428,0.3733765482902527,0.1364629821017202,0.8428702570379437,9.338024392491207e-05,0.2716996099689297,0.15721826945145742,0.9995136186770428,24.13287353515625,0.138090026007544,0.8428702570379437,4.227437784720678e-06,0.2716996099689297,0.15721826945145742,0.9995136186770428,1.1194851398468018,0.1372987070184668,0.8429467564259486,1.0,0.2716996099689297,0.15721826945145742,0.9995136186770428,0.3733765482902527,0.13634804704446896
9
+ 0,-1,0.8429467564259486,1.0,0.2716996099689297,0.15721826945145742,0.9995136186770428,0.37961846590042114,0.13624718847690453,0.8428702570379437,9.488363866694272e-05,0.2716996099689297,0.15721826945145742,0.9995136186770428,23.998687744140625,0.13798427053244927,0.8428702570379437,4.283218004275113e-06,0.2716996099689297,0.15721826945145742,0.9995136186770428,1.1138954162597656,0.13711481852996588,0.8427172582619339,1.0,0.2716996099689297,0.15721826945145742,0.9995136186770428,0.3796186149120331,0.13622244083367108
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0e829186494b520b01029492e797220e562b82ec9838a666d74ca103326c31a
3
+ size 437967672
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[UNK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "30526": {
44
+ "content": "<mask>",
45
+ "lstrip": true,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ }
51
+ },
52
+ "bos_token": "<s>",
53
+ "clean_up_tokenization_spaces": true,
54
+ "cls_token": "<s>",
55
+ "do_lower_case": true,
56
+ "eos_token": "</s>",
57
+ "mask_token": "<mask>",
58
+ "max_length": 250,
59
+ "model_max_length": 512,
60
+ "pad_to_multiple_of": null,
61
+ "pad_token": "<pad>",
62
+ "pad_token_type_id": 0,
63
+ "padding_side": "right",
64
+ "sep_token": "</s>",
65
+ "stride": 0,
66
+ "strip_accents": null,
67
+ "tokenize_chinese_chars": true,
68
+ "tokenizer_class": "MPNetTokenizer",
69
+ "truncation_side": "right",
70
+ "truncation_strategy": "longest_first",
71
+ "unk_token": "[UNK]"
72
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff