Training in progress, step 10
Browse files- 26_10_23_config_test.csv +2 -0
- 26_10_23_config_test_3.csv +1 -1
- 26_10_23_results_real_3.csv +1 -1
- 26_10_23_results_test.csv +2 -0
- README.md +13 -0
- adapter_model.bin +1 -1
- adapter_model.safetensors +1 -1
- proc_dataset.csv +0 -0
- training_args.bin +1 -1
26_10_23_config_test.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
run_number,comment,peformed_already,num_train_epochs,max_tokens,temperature,stop_token,classification_of_valuems,stemming,lemmatization
|
2 |
+
1,no variations,False,2,100,0.8,False,False,False,False
|
26_10_23_config_test_3.csv
CHANGED
@@ -5,4 +5,4 @@ run_number,comment,peformed_already,num_train_epochs,max_tokens,temperature,stop
|
|
5 |
4,classification_of_valuems set True,True,2,100,0.8,False,True,False,False
|
6 |
5,lemmatization set True stemming set True classification_of_valuems set True,True,2,100,0.8,False,True,True,True
|
7 |
6,lemmatization set True classification_of_valuems set True,True,2,100,0.8,False,True,False,True
|
8 |
-
7,lemmatization set True stemming set True classification_of_valuems set True,
|
|
|
5 |
4,classification_of_valuems set True,True,2,100,0.8,False,True,False,False
|
6 |
5,lemmatization set True stemming set True classification_of_valuems set True,True,2,100,0.8,False,True,True,True
|
7 |
6,lemmatization set True classification_of_valuems set True,True,2,100,0.8,False,True,False,True
|
8 |
+
7,lemmatization set True stemming set True classification_of_valuems set True,True,2,100,0.8,False,True,True,True
|
26_10_23_results_real_3.csv
CHANGED
@@ -5,4 +5,4 @@ run_number,items_per_minute,changed_settings,total_time_taken,rouge_scores_unnes
|
|
5 |
4,1525.602524101584,classification_of_valuems set True,17.304638385772705,0,0.3449559795118336,0.3423341666457499,0.3223067218438548,0.3962323417858379,0.3934280156800367,0.3705820791169619,0.4480554100301472,0.4472379094634592,0.4191781582462838,0.1924640322240557,0.1864412220397102,0.1791588870738077,0.2322731554369229,0.2249807377601852,0.2130668974234288,0.275524472174082,0.267281230315844,0.2520826707151871,0.25,0.25,0.25
|
6 |
5,1526.1514943613795,lemmatization set True stemming set True classification_of_valuems set True,17.29841375350952,0,0.2740090800292036,0.3255265414032295,0.2775650458916542,0.3242606066397121,0.382074138505741,0.3267769081728278,0.3772870132781871,0.4458722947774239,0.3791647882107381,0.1589738558026723,0.1948121530040918,0.163589425650448,0.197346401556911,0.2400173323974756,0.2012985648757239,0.2417203208425002,0.291225773302693,0.2448890209857868,0.5,0.5,0.5
|
7 |
6,1530.684509829563,lemmatization set True classification_of_valuems set True,17.247185707092285,0,0.2089967577635823,0.2539863482828974,0.2180593819184433,0.2566736342083835,0.3067268601430931,0.2634860133658033,0.3083760408920908,0.3641939637052689,0.3143688188949664,0.1022732513904408,0.1231935696888519,0.1061505173202719,0.1366654941744046,0.1619876288426343,0.1387847257382091,0.1783473709510714,0.2101453928900783,0.1797982054917218,0.4999999999999998,0.4999999999999998,0.4999999999999998
|
8 |
-
|
|
|
5 |
4,1525.602524101584,classification_of_valuems set True,17.304638385772705,0,0.3449559795118336,0.3423341666457499,0.3223067218438548,0.3962323417858379,0.3934280156800367,0.3705820791169619,0.4480554100301472,0.4472379094634592,0.4191781582462838,0.1924640322240557,0.1864412220397102,0.1791588870738077,0.2322731554369229,0.2249807377601852,0.2130668974234288,0.275524472174082,0.267281230315844,0.2520826707151871,0.25,0.25,0.25
|
6 |
5,1526.1514943613795,lemmatization set True stemming set True classification_of_valuems set True,17.29841375350952,0,0.2740090800292036,0.3255265414032295,0.2775650458916542,0.3242606066397121,0.382074138505741,0.3267769081728278,0.3772870132781871,0.4458722947774239,0.3791647882107381,0.1589738558026723,0.1948121530040918,0.163589425650448,0.197346401556911,0.2400173323974756,0.2012985648757239,0.2417203208425002,0.291225773302693,0.2448890209857868,0.5,0.5,0.5
|
7 |
6,1530.684509829563,lemmatization set True classification_of_valuems set True,17.247185707092285,0,0.2089967577635823,0.2539863482828974,0.2180593819184433,0.2566736342083835,0.3067268601430931,0.2634860133658033,0.3083760408920908,0.3641939637052689,0.3143688188949664,0.1022732513904408,0.1231935696888519,0.1061505173202719,0.1366654941744046,0.1619876288426343,0.1387847257382091,0.1783473709510714,0.2101453928900783,0.1797982054917218,0.4999999999999998,0.4999999999999998,0.4999999999999998
|
8 |
+
1,1537.4830854419854,lemmatization set True stemming set True classification_of_valuems set True,17.170920610427856,0,0.3137446610565907,0.346938031739325,0.3116890258549815,0.36716366175121,0.4078239534610949,0.3658593356405938,0.4216956378639542,0.4672338475399563,0.4174884317080075,0.1868318428239036,0.2090565062942769,0.1872856926409339,0.2333035163819085,0.2570762720696271,0.2307891585347144,0.2805117716175197,0.3026827885812185,0.2738993050878683,0.25,0.25,0.25
|
26_10_23_results_test.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
run_number,items_per_minute,changed_settings,total_time_taken,rouge_scores_unnest,rouge1 low Precision,rouge1 low Recall,rouge1 low F1 Score,rouge1 mid Precision,rouge1 mid Recall,rouge1 mid F1 Score,rouge1 high Precision,rouge1 high Recall,rouge1 high F1 Score,rouge2 low Precision,rouge2 low Recall,rouge2 low F1 Score,rouge2 mid Precision,rouge2 mid Recall,rouge2 mid F1 Score,rouge2 high Precision,rouge2 high Recall,rouge2 high F1 Score,min_cosine_sim_value,max_cosine_sim_value,mean_cosine_sim_value
|
2 |
+
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
README.md
CHANGED
@@ -856,6 +856,18 @@ The following `bitsandbytes` quantization config was used during training:
|
|
856 |
- bnb_4bit_use_double_quant: True
|
857 |
- bnb_4bit_compute_dtype: bfloat16
|
858 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
859 |
The following `bitsandbytes` quantization config was used during training:
|
860 |
- quant_method: bitsandbytes
|
861 |
- load_in_8bit: False
|
@@ -940,5 +952,6 @@ The following `bitsandbytes` quantization config was used during training:
|
|
940 |
- PEFT 0.5.0
|
941 |
- PEFT 0.5.0
|
942 |
- PEFT 0.5.0
|
|
|
943 |
|
944 |
- PEFT 0.5.0
|
|
|
856 |
- bnb_4bit_use_double_quant: True
|
857 |
- bnb_4bit_compute_dtype: bfloat16
|
858 |
|
859 |
+
The following `bitsandbytes` quantization config was used during training:
|
860 |
+
- quant_method: bitsandbytes
|
861 |
+
- load_in_8bit: False
|
862 |
+
- load_in_4bit: True
|
863 |
+
- llm_int8_threshold: 6.0
|
864 |
+
- llm_int8_skip_modules: None
|
865 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
866 |
+
- llm_int8_has_fp16_weight: False
|
867 |
+
- bnb_4bit_quant_type: nf4
|
868 |
+
- bnb_4bit_use_double_quant: True
|
869 |
+
- bnb_4bit_compute_dtype: bfloat16
|
870 |
+
|
871 |
The following `bitsandbytes` quantization config was used during training:
|
872 |
- quant_method: bitsandbytes
|
873 |
- load_in_8bit: False
|
|
|
952 |
- PEFT 0.5.0
|
953 |
- PEFT 0.5.0
|
954 |
- PEFT 0.5.0
|
955 |
+
- PEFT 0.5.0
|
956 |
|
957 |
- PEFT 0.5.0
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100733709
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd2d4075639d125fd8fd4bf190ebf464b8dcdb35da291c6442e49e9f4be9f3a1
|
3 |
size 100733709
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 100690288
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0607bdc015af4c4d62d248f29c61b9d188b89add5174c3cf472ffedb37026a85
|
3 |
size 100690288
|
proc_dataset.csv
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4283
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a6e6b27f3b5276f9444f89035fc4e30d713de28fb481153b8b90f3a2d90940b
|
3 |
size 4283
|