joelniklaus commited on
Commit
18dc43e
1 Parent(s): c4db689

Training in progress, step 200000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c88079b89bf17ef7fd7b8f301f11fd56c5454556700ef215afcc3a6d82b6fd4e
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58f10ed6d7fa49eb59e909d6c06dc5c9d4e9a21d54749718ae423ca79f19ec2a
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb82296c9210d3fc85543370f1095649fa4d65e39474e35d9b2e22f13bf9af90
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed866c28d1fd9af9d2fc9acd25ad59caa27a7042168cf8dae993d46999adb4c
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e82a4578704540d716af86473de2c70d43d424fff2812358162e73d576bf0ee
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0864d6d8b85a277ad9762a99dcf32c711d65753ea08f2fba98f44cd73ae361a2
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:466c6a5b8905e18425c9b34e30a286f9f5cd812d089137c68b3bf75167c114fb
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34c7791bbb10178054552ff1c1aa9bf08a101a8199906b7bf72dd42f5c977109
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 24.02628,
5
- "global_step": 150000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -930,11 +930,319 @@
930
  "eval_samples_per_second": 452.806,
931
  "eval_steps_per_second": 3.622,
932
  "step": 150000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
933
  }
934
  ],
935
  "max_steps": 200000,
936
  "num_train_epochs": 9223372036854775807,
937
- "total_flos": 2.527113008048505e+18,
938
  "trial_name": null,
939
  "trial_params": null
940
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 33.004885,
5
+ "global_step": 200000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
930
  "eval_samples_per_second": 452.806,
931
  "eval_steps_per_second": 3.622,
932
  "step": 150000
933
+ },
934
+ {
935
+ "epoch": 25.0,
936
+ "learning_rate": 1.553232954407171e-05,
937
+ "loss": 0.7802,
938
+ "step": 151000
939
+ },
940
+ {
941
+ "epoch": 25.01,
942
+ "learning_rate": 1.4938160786375572e-05,
943
+ "loss": 0.6587,
944
+ "step": 152000
945
+ },
946
+ {
947
+ "epoch": 25.01,
948
+ "learning_rate": 1.435357758543015e-05,
949
+ "loss": 0.7201,
950
+ "step": 153000
951
+ },
952
+ {
953
+ "epoch": 25.02,
954
+ "learning_rate": 1.3778739760445552e-05,
955
+ "loss": 0.8071,
956
+ "step": 154000
957
+ },
958
+ {
959
+ "epoch": 25.02,
960
+ "learning_rate": 1.3213804466343421e-05,
961
+ "loss": 0.8215,
962
+ "step": 155000
963
+ },
964
+ {
965
+ "epoch": 25.03,
966
+ "learning_rate": 1.2658926150792322e-05,
967
+ "loss": 0.784,
968
+ "step": 156000
969
+ },
970
+ {
971
+ "epoch": 26.0,
972
+ "learning_rate": 1.2114256511983274e-05,
973
+ "loss": 0.7825,
974
+ "step": 157000
975
+ },
976
+ {
977
+ "epoch": 26.01,
978
+ "learning_rate": 1.157994445715706e-05,
979
+ "loss": 0.6544,
980
+ "step": 158000
981
+ },
982
+ {
983
+ "epoch": 26.01,
984
+ "learning_rate": 1.1056136061894384e-05,
985
+ "loss": 0.7131,
986
+ "step": 159000
987
+ },
988
+ {
989
+ "epoch": 26.02,
990
+ "learning_rate": 1.0542974530180327e-05,
991
+ "loss": 0.801,
992
+ "step": 160000
993
+ },
994
+ {
995
+ "epoch": 26.02,
996
+ "learning_rate": 1.0040600155253765e-05,
997
+ "loss": 0.8218,
998
+ "step": 161000
999
+ },
1000
+ {
1001
+ "epoch": 26.03,
1002
+ "learning_rate": 9.549150281252633e-06,
1003
+ "loss": 0.7793,
1004
+ "step": 162000
1005
+ },
1006
+ {
1007
+ "epoch": 27.0,
1008
+ "learning_rate": 9.068759265665384e-06,
1009
+ "loss": 0.7901,
1010
+ "step": 163000
1011
+ },
1012
+ {
1013
+ "epoch": 27.01,
1014
+ "learning_rate": 8.599558442598998e-06,
1015
+ "loss": 0.6464,
1016
+ "step": 164000
1017
+ },
1018
+ {
1019
+ "epoch": 27.01,
1020
+ "learning_rate": 8.141676086873572e-06,
1021
+ "loss": 0.7081,
1022
+ "step": 165000
1023
+ },
1024
+ {
1025
+ "epoch": 27.02,
1026
+ "learning_rate": 7.695237378953223e-06,
1027
+ "loss": 0.7974,
1028
+ "step": 166000
1029
+ },
1030
+ {
1031
+ "epoch": 27.02,
1032
+ "learning_rate": 7.260364370723044e-06,
1033
+ "loss": 0.8208,
1034
+ "step": 167000
1035
+ },
1036
+ {
1037
+ "epoch": 27.03,
1038
+ "learning_rate": 6.837175952121306e-06,
1039
+ "loss": 0.7759,
1040
+ "step": 168000
1041
+ },
1042
+ {
1043
+ "epoch": 28.0,
1044
+ "learning_rate": 6.425787818636131e-06,
1045
+ "loss": 0.7954,
1046
+ "step": 169000
1047
+ },
1048
+ {
1049
+ "epoch": 28.01,
1050
+ "learning_rate": 6.026312439675552e-06,
1051
+ "loss": 0.6422,
1052
+ "step": 170000
1053
+ },
1054
+ {
1055
+ "epoch": 28.01,
1056
+ "learning_rate": 5.6388590278194096e-06,
1057
+ "loss": 0.7056,
1058
+ "step": 171000
1059
+ },
1060
+ {
1061
+ "epoch": 28.02,
1062
+ "learning_rate": 5.263533508961827e-06,
1063
+ "loss": 0.7913,
1064
+ "step": 172000
1065
+ },
1066
+ {
1067
+ "epoch": 28.02,
1068
+ "learning_rate": 4.900438493352055e-06,
1069
+ "loss": 0.8196,
1070
+ "step": 173000
1071
+ },
1072
+ {
1073
+ "epoch": 28.03,
1074
+ "learning_rate": 4.549673247541875e-06,
1075
+ "loss": 0.7735,
1076
+ "step": 174000
1077
+ },
1078
+ {
1079
+ "epoch": 29.0,
1080
+ "learning_rate": 4.2113336672471245e-06,
1081
+ "loss": 0.8013,
1082
+ "step": 175000
1083
+ },
1084
+ {
1085
+ "epoch": 29.01,
1086
+ "learning_rate": 3.885512251130763e-06,
1087
+ "loss": 0.6383,
1088
+ "step": 176000
1089
+ },
1090
+ {
1091
+ "epoch": 29.01,
1092
+ "learning_rate": 3.5722980755146517e-06,
1093
+ "loss": 0.7044,
1094
+ "step": 177000
1095
+ },
1096
+ {
1097
+ "epoch": 29.02,
1098
+ "learning_rate": 3.271776770026963e-06,
1099
+ "loss": 0.7846,
1100
+ "step": 178000
1101
+ },
1102
+ {
1103
+ "epoch": 29.02,
1104
+ "learning_rate": 2.9840304941919415e-06,
1105
+ "loss": 0.8199,
1106
+ "step": 179000
1107
+ },
1108
+ {
1109
+ "epoch": 29.03,
1110
+ "learning_rate": 2.7091379149682685e-06,
1111
+ "loss": 0.7701,
1112
+ "step": 180000
1113
+ },
1114
+ {
1115
+ "epoch": 30.0,
1116
+ "learning_rate": 2.4471741852423237e-06,
1117
+ "loss": 0.8161,
1118
+ "step": 181000
1119
+ },
1120
+ {
1121
+ "epoch": 30.01,
1122
+ "learning_rate": 2.1982109232821178e-06,
1123
+ "loss": 0.6251,
1124
+ "step": 182000
1125
+ },
1126
+ {
1127
+ "epoch": 30.01,
1128
+ "learning_rate": 1.962316193157593e-06,
1129
+ "loss": 0.703,
1130
+ "step": 183000
1131
+ },
1132
+ {
1133
+ "epoch": 30.02,
1134
+ "learning_rate": 1.7395544861325718e-06,
1135
+ "loss": 0.7792,
1136
+ "step": 184000
1137
+ },
1138
+ {
1139
+ "epoch": 30.02,
1140
+ "learning_rate": 1.5299867030334814e-06,
1141
+ "loss": 0.8229,
1142
+ "step": 185000
1143
+ },
1144
+ {
1145
+ "epoch": 30.03,
1146
+ "learning_rate": 1.333670137599713e-06,
1147
+ "loss": 0.767,
1148
+ "step": 186000
1149
+ },
1150
+ {
1151
+ "epoch": 31.0,
1152
+ "learning_rate": 1.1506584608200367e-06,
1153
+ "loss": 0.8206,
1154
+ "step": 187000
1155
+ },
1156
+ {
1157
+ "epoch": 31.01,
1158
+ "learning_rate": 9.810017062595322e-07,
1159
+ "loss": 0.6242,
1160
+ "step": 188000
1161
+ },
1162
+ {
1163
+ "epoch": 31.01,
1164
+ "learning_rate": 8.247462563808817e-07,
1165
+ "loss": 0.7006,
1166
+ "step": 189000
1167
+ },
1168
+ {
1169
+ "epoch": 31.02,
1170
+ "learning_rate": 6.819348298638839e-07,
1171
+ "loss": 0.7765,
1172
+ "step": 190000
1173
+ },
1174
+ {
1175
+ "epoch": 31.02,
1176
+ "learning_rate": 5.526064699265753e-07,
1177
+ "loss": 0.823,
1178
+ "step": 191000
1179
+ },
1180
+ {
1181
+ "epoch": 31.03,
1182
+ "learning_rate": 4.367965336512403e-07,
1183
+ "loss": 0.7648,
1184
+ "step": 192000
1185
+ },
1186
+ {
1187
+ "epoch": 32.0,
1188
+ "learning_rate": 3.3453668231809286e-07,
1189
+ "loss": 0.8259,
1190
+ "step": 193000
1191
+ },
1192
+ {
1193
+ "epoch": 32.01,
1194
+ "learning_rate": 2.458548727494292e-07,
1195
+ "loss": 0.6228,
1196
+ "step": 194000
1197
+ },
1198
+ {
1199
+ "epoch": 32.01,
1200
+ "learning_rate": 1.7077534966650766e-07,
1201
+ "loss": 0.6995,
1202
+ "step": 195000
1203
+ },
1204
+ {
1205
+ "epoch": 32.02,
1206
+ "learning_rate": 1.0931863906127327e-07,
1207
+ "loss": 0.7717,
1208
+ "step": 196000
1209
+ },
1210
+ {
1211
+ "epoch": 32.02,
1212
+ "learning_rate": 6.150154258476315e-08,
1213
+ "loss": 0.8257,
1214
+ "step": 197000
1215
+ },
1216
+ {
1217
+ "epoch": 32.03,
1218
+ "learning_rate": 2.7337132953697554e-08,
1219
+ "loss": 0.7632,
1220
+ "step": 198000
1221
+ },
1222
+ {
1223
+ "epoch": 32.03,
1224
+ "learning_rate": 6.834750376549792e-09,
1225
+ "loss": 0.8274,
1226
+ "step": 199000
1227
+ },
1228
+ {
1229
+ "epoch": 33.0,
1230
+ "learning_rate": 0.0,
1231
+ "loss": 0.625,
1232
+ "step": 200000
1233
+ },
1234
+ {
1235
+ "epoch": 33.0,
1236
+ "eval_loss": 0.22049324214458466,
1237
+ "eval_runtime": 11.0282,
1238
+ "eval_samples_per_second": 453.385,
1239
+ "eval_steps_per_second": 3.627,
1240
+ "step": 200000
1241
  }
1242
  ],
1243
  "max_steps": 200000,
1244
  "num_train_epochs": 9223372036854775807,
1245
+ "total_flos": 3.369496644780294e+18,
1246
  "trial_name": null,
1247
  "trial_params": null
1248
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb82296c9210d3fc85543370f1095649fa4d65e39474e35d9b2e22f13bf9af90
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed866c28d1fd9af9d2fc9acd25ad59caa27a7042168cf8dae993d46999adb4c
3
  size 442675755
runs/Feb06_00-19-31_t1v-n-e8ea8395-w-0/events.out.tfevents.1675642795.t1v-n-e8ea8395-w-0.13064.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c3230328b8a7532d69b1559a890fc8759f7bf08ab718c10d5ce96ce2bbde4a5
3
- size 28555
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:324e8dadd6acd9f764083b1324b804cdccc676061ad00109dd952a5110fc86b9
3
+ size 36831