ajrayman commited on
Commit
dc88d5c
1 Parent(s): 1aba8c5

Training in progress, epoch 1

Browse files
Files changed (4) hide show
  1. README.md +14 -13
  2. config.json +6 -6
  3. model.safetensors +2 -2
  4. training_args.bin +2 -2
README.md CHANGED
@@ -1,24 +1,25 @@
1
  ---
 
2
  license: mit
3
- base_model: roberta-base
4
  tags:
5
  - generated_from_trainer
6
  model-index:
7
- - name: Artistic_Interests_continuous
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
  should probably proofread and complete it, then remove this comment. -->
13
 
14
- # Artistic_Interests_continuous
15
 
16
- This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 0.0487
19
- - Rmse: 0.2207
20
- - Mae: 0.1806
21
- - Corr: 0.3002
22
 
23
  ## Model description
24
 
@@ -49,13 +50,13 @@ The following hyperparameters were used during training:
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Rmse | Mae | Corr |
51
  |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|
52
- | No log | 1.0 | 268 | 0.0499 | 0.2234 | 0.1834 | 0.2657 |
53
- | 0.0616 | 2.0 | 536 | 0.0487 | 0.2207 | 0.1806 | 0.3002 |
54
 
55
 
56
  ### Framework versions
57
 
58
- - Transformers 4.43.3
59
- - Pytorch 2.4.0
60
- - Datasets 2.20.0
61
  - Tokenizers 0.19.1
 
1
  ---
2
+ library_name: transformers
3
  license: mit
4
+ base_model: roberta-large
5
  tags:
6
  - generated_from_trainer
7
  model-index:
8
+ - name: Imagination_continuous
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
+ # Imagination_continuous
16
 
17
+ This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.0528
20
+ - Rmse: 0.2298
21
+ - Mae: 0.1814
22
+ - Corr: 0.2422
23
 
24
  ## Model description
25
 
 
50
 
51
  | Training Loss | Epoch | Step | Validation Loss | Rmse | Mae | Corr |
52
  |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|
53
+ | No log | 1.0 | 268 | 0.0512 | 0.2262 | 0.1848 | 0.2249 |
54
+ | 0.0724 | 2.0 | 536 | 0.0528 | 0.2298 | 0.1814 | 0.2422 |
55
 
56
 
57
  ### Framework versions
58
 
59
+ - Transformers 4.44.1
60
+ - Pytorch 1.11.0
61
+ - Datasets 2.12.0
62
  - Tokenizers 0.19.1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "roberta-base",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
@@ -9,25 +9,25 @@
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
  "id2label": {
14
  "0": "LABEL_0"
15
  },
16
  "initializer_range": 0.02,
17
- "intermediate_size": 3072,
18
  "label2id": {
19
  "LABEL_0": 0
20
  },
21
  "layer_norm_eps": 1e-05,
22
  "max_position_embeddings": 514,
23
  "model_type": "roberta",
24
- "num_attention_heads": 12,
25
- "num_hidden_layers": 12,
26
  "pad_token_id": 1,
27
  "position_embedding_type": "absolute",
28
  "problem_type": "regression",
29
  "torch_dtype": "float32",
30
- "transformers_version": "4.43.3",
31
  "type_vocab_size": 1,
32
  "use_cache": true,
33
  "vocab_size": 50265
 
1
  {
2
+ "_name_or_path": "roberta-large",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
 
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
  "id2label": {
14
  "0": "LABEL_0"
15
  },
16
  "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
  "label2id": {
19
  "LABEL_0": 0
20
  },
21
  "layer_norm_eps": 1e-05,
22
  "max_position_embeddings": 514,
23
  "model_type": "roberta",
24
+ "num_attention_heads": 16,
25
+ "num_hidden_layers": 24,
26
  "pad_token_id": 1,
27
  "position_embedding_type": "absolute",
28
  "problem_type": "regression",
29
  "torch_dtype": "float32",
30
+ "transformers_version": "4.44.1",
31
  "type_vocab_size": 1,
32
  "use_cache": true,
33
  "vocab_size": 50265
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ecf57b29b5680bd6b63a47a8cd3c43dbed16130d5b5ed5bddb17879f1420d54
3
- size 498609748
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f782c95f766d8b08897bbaa36b1c8c11315a6a2eef333c9f2f3de7a68d292f9
3
+ size 1421491316
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b049182915c07632eac6ac844cc6590594d7f871da5a98769e845d5db8ad9fc5
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85c39e1623103a47b8beba7b65f850d7d8f267ce530319d0be810558a0b08586
3
+ size 4783