joelniklaus commited on
Commit
b64ebb0
1 Parent(s): 34e1cd0

Training in progress, step 650000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb95505ff1ce1f1ae60ccc0ff56e50c65e71709500988743eae779417bef954a
3
  size 2693742553
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bad596f8634edd936edd9e3205eea7787c085b96adaf2810bf8ba684fa5a092
3
  size 2693742553
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e2579766c6a94f29afda6f54a1704f10bb7cf17a2913315942fb6937f47bdb8
3
  size 1346893675
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3592fbfb52d6223f19735cb1b16dcd5982acbab84a741828f89477f39862a75c
3
  size 1346893675
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cd4be4e71f0e5e688c9d4c88aa4476996680e59e468a727a488142940a8b5d9
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682a554c00508d94189039fe81f3315d3c2e519f57096c412b356dbab1da5a65
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f595742cd0d96240559aaf1ff72fa8686f62da9f07c5878ab2af30ab1e4f0a07
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d7fa20411577666fac76fe76348b4f9231439cc2e524d6e3185910c258591e9
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 4.015831,
5
- "global_step": 600000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -3702,11 +3702,319 @@
3702
  "eval_samples_per_second": 255.501,
3703
  "eval_steps_per_second": 4.037,
3704
  "step": 600000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3705
  }
3706
  ],
3707
  "max_steps": 1000000,
3708
  "num_train_epochs": 9223372036854775807,
3709
- "total_flos": 3.579010302668597e+19,
3710
  "trial_name": null,
3711
  "trial_params": null
3712
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.065831,
5
+ "global_step": 650000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
3702
  "eval_samples_per_second": 255.501,
3703
  "eval_steps_per_second": 4.037,
3704
  "step": 600000
3705
+ },
3706
+ {
3707
+ "epoch": 4.02,
3708
+ "learning_rate": 3.756550564175727e-05,
3709
+ "loss": 0.5829,
3710
+ "step": 601000
3711
+ },
3712
+ {
3713
+ "epoch": 4.02,
3714
+ "learning_rate": 3.74054216221926e-05,
3715
+ "loss": 0.6222,
3716
+ "step": 602000
3717
+ },
3718
+ {
3719
+ "epoch": 4.02,
3720
+ "learning_rate": 3.7245475334919246e-05,
3721
+ "loss": 0.7225,
3722
+ "step": 603000
3723
+ },
3724
+ {
3725
+ "epoch": 4.02,
3726
+ "learning_rate": 3.7085668529084184e-05,
3727
+ "loss": 0.6782,
3728
+ "step": 604000
3729
+ },
3730
+ {
3731
+ "epoch": 4.02,
3732
+ "learning_rate": 3.6926002952309016e-05,
3733
+ "loss": 0.6024,
3734
+ "step": 605000
3735
+ },
3736
+ {
3737
+ "epoch": 4.02,
3738
+ "learning_rate": 3.676648035067093e-05,
3739
+ "loss": 0.5516,
3740
+ "step": 606000
3741
+ },
3742
+ {
3743
+ "epoch": 4.02,
3744
+ "learning_rate": 3.6607102468683526e-05,
3745
+ "loss": 0.625,
3746
+ "step": 607000
3747
+ },
3748
+ {
3749
+ "epoch": 4.02,
3750
+ "learning_rate": 3.6447871049277796e-05,
3751
+ "loss": 0.7176,
3752
+ "step": 608000
3753
+ },
3754
+ {
3755
+ "epoch": 4.02,
3756
+ "learning_rate": 3.628878783378302e-05,
3757
+ "loss": 0.7112,
3758
+ "step": 609000
3759
+ },
3760
+ {
3761
+ "epoch": 4.03,
3762
+ "learning_rate": 3.612985456190778e-05,
3763
+ "loss": 0.5777,
3764
+ "step": 610000
3765
+ },
3766
+ {
3767
+ "epoch": 4.03,
3768
+ "learning_rate": 3.597107297172084e-05,
3769
+ "loss": 0.5445,
3770
+ "step": 611000
3771
+ },
3772
+ {
3773
+ "epoch": 4.03,
3774
+ "learning_rate": 3.581244479963225e-05,
3775
+ "loss": 0.6362,
3776
+ "step": 612000
3777
+ },
3778
+ {
3779
+ "epoch": 4.03,
3780
+ "learning_rate": 3.5653971780374295e-05,
3781
+ "loss": 0.7177,
3782
+ "step": 613000
3783
+ },
3784
+ {
3785
+ "epoch": 4.03,
3786
+ "learning_rate": 3.5495655646982505e-05,
3787
+ "loss": 0.6937,
3788
+ "step": 614000
3789
+ },
3790
+ {
3791
+ "epoch": 4.03,
3792
+ "learning_rate": 3.533749813077677e-05,
3793
+ "loss": 0.5634,
3794
+ "step": 615000
3795
+ },
3796
+ {
3797
+ "epoch": 4.03,
3798
+ "learning_rate": 3.517950096134232e-05,
3799
+ "loss": 0.5808,
3800
+ "step": 616000
3801
+ },
3802
+ {
3803
+ "epoch": 4.03,
3804
+ "learning_rate": 3.5021665866510925e-05,
3805
+ "loss": 0.6136,
3806
+ "step": 617000
3807
+ },
3808
+ {
3809
+ "epoch": 4.03,
3810
+ "learning_rate": 3.4863994572341843e-05,
3811
+ "loss": 0.7181,
3812
+ "step": 618000
3813
+ },
3814
+ {
3815
+ "epoch": 4.03,
3816
+ "learning_rate": 3.470648880310313e-05,
3817
+ "loss": 0.6717,
3818
+ "step": 619000
3819
+ },
3820
+ {
3821
+ "epoch": 4.04,
3822
+ "learning_rate": 3.4549150281252636e-05,
3823
+ "loss": 0.6079,
3824
+ "step": 620000
3825
+ },
3826
+ {
3827
+ "epoch": 4.04,
3828
+ "learning_rate": 3.439198072741921e-05,
3829
+ "loss": 0.5487,
3830
+ "step": 621000
3831
+ },
3832
+ {
3833
+ "epoch": 4.04,
3834
+ "learning_rate": 3.423498186038393e-05,
3835
+ "loss": 0.6154,
3836
+ "step": 622000
3837
+ },
3838
+ {
3839
+ "epoch": 4.04,
3840
+ "learning_rate": 3.407815539706124e-05,
3841
+ "loss": 0.7083,
3842
+ "step": 623000
3843
+ },
3844
+ {
3845
+ "epoch": 4.04,
3846
+ "learning_rate": 3.392150305248024e-05,
3847
+ "loss": 0.7057,
3848
+ "step": 624000
3849
+ },
3850
+ {
3851
+ "epoch": 4.04,
3852
+ "learning_rate": 3.3765026539765834e-05,
3853
+ "loss": 0.5796,
3854
+ "step": 625000
3855
+ },
3856
+ {
3857
+ "epoch": 4.04,
3858
+ "learning_rate": 3.360872757012011e-05,
3859
+ "loss": 0.5453,
3860
+ "step": 626000
3861
+ },
3862
+ {
3863
+ "epoch": 4.04,
3864
+ "learning_rate": 3.3452607852803584e-05,
3865
+ "loss": 0.6212,
3866
+ "step": 627000
3867
+ },
3868
+ {
3869
+ "epoch": 4.04,
3870
+ "learning_rate": 3.329666909511645e-05,
3871
+ "loss": 0.7134,
3872
+ "step": 628000
3873
+ },
3874
+ {
3875
+ "epoch": 4.04,
3876
+ "learning_rate": 3.3140913002379995e-05,
3877
+ "loss": 0.688,
3878
+ "step": 629000
3879
+ },
3880
+ {
3881
+ "epoch": 4.05,
3882
+ "learning_rate": 3.298534127791785e-05,
3883
+ "loss": 0.5666,
3884
+ "step": 630000
3885
+ },
3886
+ {
3887
+ "epoch": 4.05,
3888
+ "learning_rate": 3.282995562303754e-05,
3889
+ "loss": 0.5787,
3890
+ "step": 631000
3891
+ },
3892
+ {
3893
+ "epoch": 4.05,
3894
+ "learning_rate": 3.267475773701161e-05,
3895
+ "loss": 0.6064,
3896
+ "step": 632000
3897
+ },
3898
+ {
3899
+ "epoch": 4.05,
3900
+ "learning_rate": 3.251974931705933e-05,
3901
+ "loss": 0.7118,
3902
+ "step": 633000
3903
+ },
3904
+ {
3905
+ "epoch": 4.05,
3906
+ "learning_rate": 3.236493205832795e-05,
3907
+ "loss": 0.6672,
3908
+ "step": 634000
3909
+ },
3910
+ {
3911
+ "epoch": 4.05,
3912
+ "learning_rate": 3.221030765387417e-05,
3913
+ "loss": 0.6105,
3914
+ "step": 635000
3915
+ },
3916
+ {
3917
+ "epoch": 4.05,
3918
+ "learning_rate": 3.205587779464576e-05,
3919
+ "loss": 0.5472,
3920
+ "step": 636000
3921
+ },
3922
+ {
3923
+ "epoch": 4.05,
3924
+ "learning_rate": 3.190164416946285e-05,
3925
+ "loss": 0.6068,
3926
+ "step": 637000
3927
+ },
3928
+ {
3929
+ "epoch": 4.05,
3930
+ "learning_rate": 3.1747608464999725e-05,
3931
+ "loss": 0.6989,
3932
+ "step": 638000
3933
+ },
3934
+ {
3935
+ "epoch": 4.05,
3936
+ "learning_rate": 3.1593772365766105e-05,
3937
+ "loss": 0.7023,
3938
+ "step": 639000
3939
+ },
3940
+ {
3941
+ "epoch": 4.06,
3942
+ "learning_rate": 3.144013755408895e-05,
3943
+ "loss": 0.5817,
3944
+ "step": 640000
3945
+ },
3946
+ {
3947
+ "epoch": 4.06,
3948
+ "learning_rate": 3.128670571009399e-05,
3949
+ "loss": 0.5433,
3950
+ "step": 641000
3951
+ },
3952
+ {
3953
+ "epoch": 4.06,
3954
+ "learning_rate": 3.113347851168721e-05,
3955
+ "loss": 0.6087,
3956
+ "step": 642000
3957
+ },
3958
+ {
3959
+ "epoch": 4.06,
3960
+ "learning_rate": 3.098045763453678e-05,
3961
+ "loss": 0.7113,
3962
+ "step": 643000
3963
+ },
3964
+ {
3965
+ "epoch": 4.06,
3966
+ "learning_rate": 3.082764475205442e-05,
3967
+ "loss": 0.6837,
3968
+ "step": 644000
3969
+ },
3970
+ {
3971
+ "epoch": 4.06,
3972
+ "learning_rate": 3.0675041535377405e-05,
3973
+ "loss": 0.5726,
3974
+ "step": 645000
3975
+ },
3976
+ {
3977
+ "epoch": 4.06,
3978
+ "learning_rate": 3.052264965335e-05,
3979
+ "loss": 0.5729,
3980
+ "step": 646000
3981
+ },
3982
+ {
3983
+ "epoch": 4.06,
3984
+ "learning_rate": 3.0370470772505433e-05,
3985
+ "loss": 0.5947,
3986
+ "step": 647000
3987
+ },
3988
+ {
3989
+ "epoch": 4.06,
3990
+ "learning_rate": 3.0218506557047598e-05,
3991
+ "loss": 0.7064,
3992
+ "step": 648000
3993
+ },
3994
+ {
3995
+ "epoch": 4.06,
3996
+ "learning_rate": 3.006675866883275e-05,
3997
+ "loss": 0.665,
3998
+ "step": 649000
3999
+ },
4000
+ {
4001
+ "epoch": 4.07,
4002
+ "learning_rate": 2.991522876735154e-05,
4003
+ "loss": 0.6141,
4004
+ "step": 650000
4005
+ },
4006
+ {
4007
+ "epoch": 4.07,
4008
+ "eval_loss": 0.35492265224456787,
4009
+ "eval_runtime": 18.2786,
4010
+ "eval_samples_per_second": 273.544,
4011
+ "eval_steps_per_second": 4.322,
4012
+ "step": 650000
4013
  }
4014
  ],
4015
  "max_steps": 1000000,
4016
  "num_train_epochs": 9223372036854775807,
4017
+ "total_flos": 3.877259794247477e+19,
4018
  "trial_name": null,
4019
  "trial_params": null
4020
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e2579766c6a94f29afda6f54a1704f10bb7cf17a2913315942fb6937f47bdb8
3
  size 1346893675
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3592fbfb52d6223f19735cb1b16dcd5982acbab84a741828f89477f39862a75c
3
  size 1346893675
runs/Jan25_00-38-34_t1v-n-7a44a9fa-w-0/events.out.tfevents.1674607144.t1v-n-7a44a9fa-w-0.3701348.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55c19380298c02754eadc66a356a383a7ce0a97751411e5a969b1ef94d442ebc
3
- size 78269
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190062049f95c6c38f5a7267bdfe9051ed0a5cf5fd38c410b1e6e756687e2879
3
+ size 86545