ddobokki commited on
Commit
4a539c1
1 Parent(s): 5277057

train STS and NLI dataset(KLUE, KAKAO)

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. config.json +1 -1
  3. pytorch_model.bin +1 -1
  4. tokenizer_config.json +1 -1
.gitattributes CHANGED
@@ -26,3 +26,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
  pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
 
 
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
  pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
29
+ .git/lfs/objects/e0/77/e07700d437b808a7a8c43550a67f06949fa1c0663f0bf68d6e194d47663c41ca filter=lfs diff=lfs merge=lfs -text
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/content/drive/MyDrive/sentence_transformers/model1/",
3
  "architectures": [
4
  "RobertaModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "/content/drive/MyDrive/sentence_transformers/model2/",
3
  "architectures": [
4
  "RobertaModel"
5
  ],
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e07700d437b808a7a8c43550a67f06949fa1c0663f0bf68d6e194d47663c41ca
3
  size 272407281
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22f800a64ef1368330beb74e6bc69c13c52dc048ab9758f53d6905e47fa5a0f
3
  size 272407281
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "bos_token": "[CLS]", "eos_token": "[SEP]", "model_max_length": 512, "special_tokens_map_file": "/root/.cache/torch/sentence_transformers/klue_roberta-small/special_tokens_map.json", "name_or_path": "/content/drive/MyDrive/sentence_transformers/model1/", "tokenizer_class": "BertTokenizer"}
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "bos_token": "[CLS]", "eos_token": "[SEP]", "model_max_length": 512, "special_tokens_map_file": "/root/.cache/torch/sentence_transformers/klue_roberta-small/special_tokens_map.json", "name_or_path": "/content/drive/MyDrive/sentence_transformers/model2/", "tokenizer_class": "BertTokenizer"}