JerMa88 commited on
Commit
49fb39f
1 Parent(s): 7f9d3d8

roberta-personality-detection-A

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: peft
3
  license: mit
4
- base_model: FacebookAI/roberta-large
5
  tags:
6
  - trl
7
  - sft
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # roberta-personality-detection-A
18
 
19
- This model is a fine-tuned version of [FacebookAI/roberta-large](https://huggingface.co/FacebookAI/roberta-large) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
1
  ---
2
  library_name: peft
3
  license: mit
4
+ base_model: FacebookAI/xlm-roberta-large
5
  tags:
6
  - trl
7
  - sft
 
16
 
17
  # roberta-personality-detection-A
18
 
19
+ This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on an unknown dataset.
20
 
21
  ## Model description
22
 
adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "FacebookAI/roberta-large",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "FacebookAI/xlm-roberta-large",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:240f895fc15f01bf8d58e588758f71978dc86b77a8bf094e0f5a46b7251237cc
3
  size 25179944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78aabb2340b314098a1d14d36897242c99d9eb03fe68799a466b526f97d0d71f
3
  size 25179944
runs/Oct23_12-24-00_bcm-dgxa100-0005/events.out.tfevents.1729704249.bcm-dgxa100-0005 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48f0f2388ff124ed2874dbc0af47bccbfddbe88afa6c12758a3baa408b665db2
3
+ size 119624
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,10 +1,9 @@
1
  {
2
- "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
  "0": {
5
  "content": "<s>",
6
  "lstrip": false,
7
- "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
@@ -12,7 +11,7 @@
12
  "1": {
13
  "content": "<pad>",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
@@ -20,7 +19,7 @@
20
  "2": {
21
  "content": "</s>",
22
  "lstrip": false,
23
- "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
@@ -28,12 +27,12 @@
28
  "3": {
29
  "content": "<unk>",
30
  "lstrip": false,
31
- "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
- "50264": {
37
  "content": "<mask>",
38
  "lstrip": true,
39
  "normalized": false,
@@ -41,7 +40,7 @@
41
  "single_word": false,
42
  "special": true
43
  },
44
- "50265": {
45
  "content": "[PAD]",
46
  "lstrip": false,
47
  "normalized": false,
@@ -54,12 +53,10 @@
54
  "clean_up_tokenization_spaces": true,
55
  "cls_token": "<s>",
56
  "eos_token": "</s>",
57
- "errors": "replace",
58
  "mask_token": "<mask>",
59
  "model_max_length": 512,
60
  "pad_token": "</s>",
61
  "sep_token": "</s>",
62
- "tokenizer_class": "RobertaTokenizer",
63
- "trim_offsets": true,
64
  "unk_token": "<unk>"
65
  }
 
1
  {
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<s>",
5
  "lstrip": false,
6
+ "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
 
11
  "1": {
12
  "content": "<pad>",
13
  "lstrip": false,
14
+ "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
 
19
  "2": {
20
  "content": "</s>",
21
  "lstrip": false,
22
+ "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
 
27
  "3": {
28
  "content": "<unk>",
29
  "lstrip": false,
30
+ "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "250001": {
36
  "content": "<mask>",
37
  "lstrip": true,
38
  "normalized": false,
 
40
  "single_word": false,
41
  "special": true
42
  },
43
+ "250002": {
44
  "content": "[PAD]",
45
  "lstrip": false,
46
  "normalized": false,
 
53
  "clean_up_tokenization_spaces": true,
54
  "cls_token": "<s>",
55
  "eos_token": "</s>",
 
56
  "mask_token": "<mask>",
57
  "model_max_length": 512,
58
  "pad_token": "</s>",
59
  "sep_token": "</s>",
60
+ "tokenizer_class": "XLMRobertaTokenizer",
 
61
  "unk_token": "<unk>"
62
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:929a45bca8496234acdca712ee4c284d82aa1404c9917093c4186d2889c02957
3
  size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa3c3dc53c905a734390d0ff4e0929733332ba3865fcd38ab202d84bd49faaa6
3
  size 5496