Training in progress, step 10
Browse files- 25_10_23_config_test_1.csv +1 -1
- 25_10_23_results_real.csv +3 -1
- README.md +13 -0
- adapter_model.bin +1 -1
- adapter_model.safetensors +1 -1
- training_args.bin +1 -1
25_10_23_config_test_1.csv
CHANGED
@@ -2,4 +2,4 @@ run_number,comment,peformed_already,num_train_epochs,max_tokens,temperature,stop
|
|
2 |
1,no variations,True,2,100,0.8,False,False,False,False
|
3 |
2,lemmatization set True,True,2,100,0.8,False,False,False,True
|
4 |
3,stemming set True,True,2,100,0.8,False,False,True,False
|
5 |
-
4,classification_of_valuems set True,
|
|
|
2 |
1,no variations,True,2,100,0.8,False,False,False,False
|
3 |
2,lemmatization set True,True,2,100,0.8,False,False,False,True
|
4 |
3,stemming set True,True,2,100,0.8,False,False,True,False
|
5 |
+
4,classification_of_valuems set True,True,2,100,0.8,False,True,False,False
|
25_10_23_results_real.csv
CHANGED
@@ -9,4 +9,6 @@ run_number,items_per_minute,changed_settings,total_time_taken,rouge_scores_unnes
|
|
9 |
8,0.0,0,0.0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
10 |
9,1527.7772954831448,stemming set True,17.28000545501709,0,0.1983957450904019,0.2519780089622217,0.2098971903962357,0.2402826270196343,0.2928000782881022,0.2490765635230842,0.2850926200202243,0.3364948588993159,0.2903129396144104,0.0723938258535772,0.0800337747692468,0.0729619311369439,0.0946682401043626,0.1041986615172271,0.0940029666330387,0.118932059220299,0.1294571703280245,0.1171713046848759
|
11 |
10,0.0,0,0.0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
12 |
-
11,
|
|
|
|
|
|
9 |
8,0.0,0,0.0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
10 |
9,1527.7772954831448,stemming set True,17.28000545501709,0,0.1983957450904019,0.2519780089622217,0.2098971903962357,0.2402826270196343,0.2928000782881022,0.2490765635230842,0.2850926200202243,0.3364948588993159,0.2903129396144104,0.0723938258535772,0.0800337747692468,0.0729619311369439,0.0946682401043626,0.1041986615172271,0.0940029666330387,0.118932059220299,0.1294571703280245,0.1171713046848759
|
11 |
10,0.0,0,0.0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
12 |
+
11,1521.609931137608,classification_of_valuems set True,17.35004448890686,0,0.3097912503061737,0.3117918619129387,0.2985123093297854,0.3692864464982227,0.3657660910086977,0.3510613139478704,0.4322932016041544,0.4194092516901677,0.4034142346670022,0.1853986019402615,0.1767452335316192,0.1744893735961138,0.2321160496220204,0.2189556994085239,0.2145193605708969,0.2770436971412456,0.2594983484442184,0.2537992371891699
|
13 |
+
12,0.0,0,0.0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
14 |
+
13,0.0,0,0.0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
README.md
CHANGED
@@ -616,6 +616,18 @@ The following `bitsandbytes` quantization config was used during training:
|
|
616 |
- bnb_4bit_use_double_quant: True
|
617 |
- bnb_4bit_compute_dtype: bfloat16
|
618 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
619 |
The following `bitsandbytes` quantization config was used during training:
|
620 |
- quant_method: bitsandbytes
|
621 |
- load_in_8bit: False
|
@@ -680,5 +692,6 @@ The following `bitsandbytes` quantization config was used during training:
|
|
680 |
- PEFT 0.5.0
|
681 |
- PEFT 0.5.0
|
682 |
- PEFT 0.5.0
|
|
|
683 |
|
684 |
- PEFT 0.5.0
|
|
|
616 |
- bnb_4bit_use_double_quant: True
|
617 |
- bnb_4bit_compute_dtype: bfloat16
|
618 |
|
619 |
+
The following `bitsandbytes` quantization config was used during training:
|
620 |
+
- quant_method: bitsandbytes
|
621 |
+
- load_in_8bit: False
|
622 |
+
- load_in_4bit: True
|
623 |
+
- llm_int8_threshold: 6.0
|
624 |
+
- llm_int8_skip_modules: None
|
625 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
626 |
+
- llm_int8_has_fp16_weight: False
|
627 |
+
- bnb_4bit_quant_type: nf4
|
628 |
+
- bnb_4bit_use_double_quant: True
|
629 |
+
- bnb_4bit_compute_dtype: bfloat16
|
630 |
+
|
631 |
The following `bitsandbytes` quantization config was used during training:
|
632 |
- quant_method: bitsandbytes
|
633 |
- load_in_8bit: False
|
|
|
692 |
- PEFT 0.5.0
|
693 |
- PEFT 0.5.0
|
694 |
- PEFT 0.5.0
|
695 |
+
- PEFT 0.5.0
|
696 |
|
697 |
- PEFT 0.5.0
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100733709
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce1f1a82ee3bdd7ed19236720cbc3352ab1892f399a5ff37bb980570ee1a3178
|
3 |
size 100733709
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100690288
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a3480b947478c17d5df359b02a08abb839a59e985f611c4c15b2f0b3a0cb324
|
3 |
size 100690288
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4283
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91c44e8dc8169fffdaa1ca9accb53d71deab0240c8b8ce38c8f9a2dcce55449d
|
3 |
size 4283
|