WKLI22 commited on
Commit
c00dc68
1 Parent(s): a49740c

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. checkpoint-10000/config.json +39 -0
  2. checkpoint-10000/generation_config.json +6 -0
  3. checkpoint-10000/merges.txt +0 -0
  4. checkpoint-10000/optimizer.pt +3 -0
  5. checkpoint-10000/pytorch_model.bin +3 -0
  6. checkpoint-10000/rng_state.pth +3 -0
  7. checkpoint-10000/scheduler.pt +3 -0
  8. checkpoint-10000/special_tokens_map.json +6 -0
  9. checkpoint-10000/tokenizer.json +0 -0
  10. checkpoint-10000/tokenizer_config.json +13 -0
  11. checkpoint-10000/trainer_state.json +0 -0
  12. checkpoint-10000/training_args.bin +3 -0
  13. checkpoint-10000/vocab.json +0 -0
  14. checkpoint-10100/config.json +39 -0
  15. checkpoint-10100/generation_config.json +6 -0
  16. checkpoint-10100/merges.txt +0 -0
  17. checkpoint-10100/optimizer.pt +3 -0
  18. checkpoint-10100/pytorch_model.bin +3 -0
  19. checkpoint-10100/rng_state.pth +3 -0
  20. checkpoint-10100/scheduler.pt +3 -0
  21. checkpoint-10100/special_tokens_map.json +6 -0
  22. checkpoint-10100/tokenizer.json +0 -0
  23. checkpoint-10100/tokenizer_config.json +13 -0
  24. checkpoint-10100/trainer_state.json +0 -0
  25. checkpoint-10100/training_args.bin +3 -0
  26. checkpoint-10100/vocab.json +0 -0
  27. checkpoint-10200/config.json +39 -0
  28. checkpoint-10200/generation_config.json +6 -0
  29. checkpoint-10200/merges.txt +0 -0
  30. checkpoint-10200/optimizer.pt +3 -0
  31. checkpoint-10200/pytorch_model.bin +3 -0
  32. checkpoint-10200/rng_state.pth +3 -0
  33. checkpoint-10200/scheduler.pt +3 -0
  34. checkpoint-10200/special_tokens_map.json +6 -0
  35. checkpoint-10200/tokenizer.json +0 -0
  36. checkpoint-10200/tokenizer_config.json +13 -0
  37. checkpoint-10200/trainer_state.json +0 -0
  38. checkpoint-10200/training_args.bin +3 -0
  39. checkpoint-10200/vocab.json +0 -0
  40. checkpoint-10300/config.json +39 -0
  41. checkpoint-10300/generation_config.json +6 -0
  42. checkpoint-10300/merges.txt +0 -0
  43. checkpoint-10300/optimizer.pt +3 -0
  44. checkpoint-10300/pytorch_model.bin +3 -0
  45. checkpoint-10300/rng_state.pth +3 -0
  46. checkpoint-10300/scheduler.pt +3 -0
  47. checkpoint-10300/special_tokens_map.json +6 -0
  48. checkpoint-10300/tokenizer.json +0 -0
  49. checkpoint-10300/tokenizer_config.json +13 -0
  50. checkpoint-10300/train.dat +206 -0
checkpoint-10000/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./codeparrot-ds/",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 0,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 0,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 128,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.33.3",
37
+ "use_cache": true,
38
+ "vocab_size": 50000
39
+ }
checkpoint-10000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.33.3"
6
+ }
checkpoint-10000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6479d2a768dcc096bcd6a136a9f64315d6b835e4760777b900f45492b21f5122
3
+ size 994063290
checkpoint-10000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b84e41fd94f5f566cc125a7931cceec7c774a25f2b6c5ec0c6cc2442e0a52d4
3
+ size 497018202
checkpoint-10000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7625f0f1a72e7d469ee566dd3a4ceef850cfb55f0a54fb9fe4af8a847f74d25e
3
+ size 14244
checkpoint-10000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc6c5cd66f35ac55512c851ca5dff0e5e575e0fd65c85e7aebf0f487e731d7e7
3
+ size 1064
checkpoint-10000/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-10000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "max_length": 128,
7
+ "model_max_length": 1024,
8
+ "stride": 0,
9
+ "tokenizer_class": "GPT2Tokenizer",
10
+ "truncation_side": "right",
11
+ "truncation_strategy": "longest_first",
12
+ "unk_token": "<|endoftext|>"
13
+ }
checkpoint-10000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc996ddf5bf2aa938695ca3827f270f481a84e212a0153a85f2b519a3755070a
3
+ size 4472
checkpoint-10000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10100/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./codeparrot-ds/",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 0,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 0,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 128,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.33.3",
37
+ "use_cache": true,
38
+ "vocab_size": 50000
39
+ }
checkpoint-10100/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.33.3"
6
+ }
checkpoint-10100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa62109375456addce2ffbb3d22759bb9b8a37a9e26c0dd2e44146ec9312d3b3
3
+ size 994063290
checkpoint-10100/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b121c385cc4f4216a0c396a4bef6b6b881b9d48bd980f9bad3084cbe0399577
3
+ size 497018202
checkpoint-10100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bffe5b3975040df280cba625c904855047af23bcabe582a256110c0ca4011ccf
3
+ size 14244
checkpoint-10100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d64e8dea00abfe7e696dc543a5630d1ddec59e55ea651d0da2cb501e3226365
3
+ size 1064
checkpoint-10100/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-10100/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10100/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "max_length": 128,
7
+ "model_max_length": 1024,
8
+ "stride": 0,
9
+ "tokenizer_class": "GPT2Tokenizer",
10
+ "truncation_side": "right",
11
+ "truncation_strategy": "longest_first",
12
+ "unk_token": "<|endoftext|>"
13
+ }
checkpoint-10100/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc996ddf5bf2aa938695ca3827f270f481a84e212a0153a85f2b519a3755070a
3
+ size 4472
checkpoint-10100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10200/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./codeparrot-ds/",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 0,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 0,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 128,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.33.3",
37
+ "use_cache": true,
38
+ "vocab_size": 50000
39
+ }
checkpoint-10200/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.33.3"
6
+ }
checkpoint-10200/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b37a0918902281a572deee590bc7f3b0789bd4834fa1e35d5800065a550fb4a
3
+ size 994063290
checkpoint-10200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a454bf8fe688683f31d2bd2db9f5003f9b317c52b377a0cd0ecf1bbeb5abda53
3
+ size 497018202
checkpoint-10200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:367e8eefcd860a5710c6593ffe363738ece6da93256c72f1ab7c4fdfc7f3c1fb
3
+ size 14244
checkpoint-10200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29651c459843f7ffb2c5b6bccfef8cd6019089d0f5672017a3d5415cbb2fb175
3
+ size 1064
checkpoint-10200/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-10200/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10200/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "max_length": 128,
7
+ "model_max_length": 1024,
8
+ "stride": 0,
9
+ "tokenizer_class": "GPT2Tokenizer",
10
+ "truncation_side": "right",
11
+ "truncation_strategy": "longest_first",
12
+ "unk_token": "<|endoftext|>"
13
+ }
checkpoint-10200/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc996ddf5bf2aa938695ca3827f270f481a84e212a0153a85f2b519a3755070a
3
+ size 4472
checkpoint-10200/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10300/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./codeparrot-ds/",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 0,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 0,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 128,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.33.3",
37
+ "use_cache": true,
38
+ "vocab_size": 50000
39
+ }
checkpoint-10300/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.33.3"
6
+ }
checkpoint-10300/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95e6aa93188b69ede317eb5d67f8b578ed660b7a884e5f3f313d78d2f1e7c9db
3
+ size 994063290
checkpoint-10300/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb1513920ebadc0cdda0d242b5878572c0059c8f98cf67fd75ff2bc72253361
3
+ size 497018202
checkpoint-10300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d30ce49f407fd9749c928157beb4db7444fdba9205230971d6f28cbfe6efef0d
3
+ size 14244
checkpoint-10300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b723d1614f7aeef2c6d6b38cb96b04a1f6f5ab23dd498799924067b79675391f
3
+ size 1064
checkpoint-10300/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-10300/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10300/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "max_length": 128,
7
+ "model_max_length": 1024,
8
+ "stride": 0,
9
+ "tokenizer_class": "GPT2Tokenizer",
10
+ "truncation_side": "right",
11
+ "truncation_strategy": "longest_first",
12
+ "unk_token": "<|endoftext|>"
13
+ }
checkpoint-10300/train.dat ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "loss": 4.2182,
2
+ "loss": 3.9201,
3
+ "loss": 3.8079,
4
+ "loss": 3.9802,
5
+ "loss": 3.9497,
6
+ "loss": 3.7474,
7
+ "loss": 3.7449,
8
+ "loss": 3.6893,
9
+ "loss": 3.518,
10
+ "loss": 3.5812,
11
+ "loss": 3.4697,
12
+ "loss": 3.4005,
13
+ "loss": 3.202,
14
+ "loss": 3.2481,
15
+ "loss": 3.4377,
16
+ "loss": 3.2781,
17
+ "loss": 3.1309,
18
+ "loss": 3.0013,
19
+ "loss": 2.9647,
20
+ "loss": 3.0833,
21
+ "loss": 2.9659,
22
+ "loss": 2.9136,
23
+ "loss": 2.9501,
24
+ "loss": 3.0921,
25
+ "loss": 2.8951,
26
+ "loss": 2.8615,
27
+ "loss": 2.886,
28
+ "loss": 2.6634,
29
+ "loss": 2.7992,
30
+ "loss": 2.7284,
31
+ "loss": 2.5383,
32
+ "loss": 2.9158,
33
+ "loss": 2.569,
34
+ "loss": 2.639,
35
+ "loss": 2.6051,
36
+ "loss": 2.5859,
37
+ "loss": 2.8165,
38
+ "loss": 2.7914,
39
+ "loss": 2.8124,
40
+ "loss": 2.536,
41
+ "loss": 2.4832,
42
+ "loss": 2.7004,
43
+ "loss": 2.5611,
44
+ "loss": 2.4008,
45
+ "loss": 2.6482,
46
+ "loss": 2.4384,
47
+ "loss": 2.6389,
48
+ "loss": 2.33,
49
+ "loss": 2.3862,
50
+ "loss": 2.4908,
51
+ "loss": 2.4269,
52
+ "loss": 2.4281,
53
+ "loss": 2.3384,
54
+ "loss": 2.6808,
55
+ "loss": 2.4791,
56
+ "loss": 2.4134,
57
+ "loss": 2.2267,
58
+ "loss": 2.4479,
59
+ "loss": 2.4216,
60
+ "loss": 2.4072,
61
+ "loss": 2.3671,
62
+ "loss": 2.2338,
63
+ "loss": 2.3592,
64
+ "loss": 2.5145,
65
+ "loss": 2.6603,
66
+ "loss": 2.3985,
67
+ "loss": 2.1567,
68
+ "loss": 2.4226,
69
+ "loss": 2.2642,
70
+ "loss": 2.2697,
71
+ "loss": 2.3797,
72
+ "loss": 2.2473,
73
+ "loss": 2.3887,
74
+ "loss": 2.2165,
75
+ "loss": 2.2813,
76
+ "loss": 2.2621,
77
+ "loss": 2.2801,
78
+ "loss": 2.2543,
79
+ "loss": 2.3779,
80
+ "loss": 2.3853,
81
+ "loss": 2.2737,
82
+ "loss": 2.3751,
83
+ "loss": 2.0766,
84
+ "loss": 2.2456,
85
+ "loss": 2.1394,
86
+ "loss": 2.0715,
87
+ "loss": 2.2069,
88
+ "loss": 2.1304,
89
+ "loss": 2.1161,
90
+ "loss": 2.1512,
91
+ "loss": 2.0955,
92
+ "loss": 2.2466,
93
+ "loss": 2.0475,
94
+ "loss": 2.1238,
95
+ "loss": 2.2063,
96
+ "loss": 1.9169,
97
+ "loss": 2.2551,
98
+ "loss": 2.2897,
99
+ "loss": 2.0693,
100
+ "loss": 2.2112,
101
+ "loss": 2.1032,
102
+ "loss": 1.8994,
103
+ "loss": 2.0233,
104
+ "loss": 2.1795,
105
+ "loss": 2.04,
106
+ "loss": 2.0926,
107
+ "loss": 1.9639,
108
+ "loss": 2.3427,
109
+ "loss": 1.9793,
110
+ "loss": 1.927,
111
+ "loss": 1.8329,
112
+ "loss": 1.9679,
113
+ "loss": 2.1962,
114
+ "loss": 2.0246,
115
+ "loss": 1.9195,
116
+ "loss": 2.0181,
117
+ "loss": 1.9007,
118
+ "loss": 2.029,
119
+ "loss": 1.9925,
120
+ "loss": 1.936,
121
+ "loss": 2.0462,
122
+ "loss": 1.9308,
123
+ "loss": 1.957,
124
+ "loss": 1.9303,
125
+ "loss": 2.0676,
126
+ "loss": 2.0288,
127
+ "loss": 1.833,
128
+ "loss": 2.0001,
129
+ "loss": 1.9148,
130
+ "loss": 1.9895,
131
+ "loss": 2.1705,
132
+ "loss": 1.8735,
133
+ "loss": 1.9762,
134
+ "loss": 1.8613,
135
+ "loss": 1.801,
136
+ "loss": 1.9148,
137
+ "loss": 2.1368,
138
+ "loss": 1.8184,
139
+ "loss": 1.5963,
140
+ "loss": 1.8879,
141
+ "loss": 1.9633,
142
+ "loss": 1.8162,
143
+ "loss": 1.6854,
144
+ "loss": 2.1203,
145
+ "loss": 1.7929,
146
+ "loss": 1.8433,
147
+ "loss": 2.0861,
148
+ "loss": 2.0828,
149
+ "loss": 1.6605,
150
+ "loss": 1.6951,
151
+ "loss": 1.9173,
152
+ "loss": 1.9349,
153
+ "loss": 1.8001,
154
+ "loss": 1.9535,
155
+ "loss": 1.7291,
156
+ "loss": 1.8336,
157
+ "loss": 2.0697,
158
+ "loss": 1.7562,
159
+ "loss": 1.7786,
160
+ "loss": 1.8246,
161
+ "loss": 1.7964,
162
+ "loss": 1.9483,
163
+ "loss": 1.7607,
164
+ "loss": 1.6808,
165
+ "loss": 1.7688,
166
+ "loss": 2.0228,
167
+ "loss": 1.9403,
168
+ "loss": 1.8574,
169
+ "loss": 1.8918,
170
+ "loss": 1.7663,
171
+ "loss": 1.7333,
172
+ "loss": 1.5394,
173
+ "loss": 1.899,
174
+ "loss": 1.9249,
175
+ "loss": 1.747,
176
+ "loss": 1.8479,
177
+ "loss": 1.8021,
178
+ "loss": 1.7032,
179
+ "loss": 1.8989,
180
+ "loss": 1.8185,
181
+ "loss": 1.6985,
182
+ "loss": 1.7797,
183
+ "loss": 1.6903,
184
+ "loss": 1.8766,
185
+ "loss": 1.7187,
186
+ "loss": 1.7295,
187
+ "loss": 1.6285,
188
+ "loss": 1.6925,
189
+ "loss": 1.719,
190
+ "loss": 1.781,
191
+ "loss": 1.7595,
192
+ "loss": 2.1111,
193
+ "loss": 1.9001,
194
+ "loss": 1.85,
195
+ "loss": 1.9016,
196
+ "loss": 1.9073,
197
+ "loss": 1.7666,
198
+ "loss": 1.8695,
199
+ "loss": 1.6799,
200
+ "loss": 1.9409,
201
+ "loss": 1.7277,
202
+ "loss": 1.7612,
203
+ "loss": 1.938,
204
+ "loss": 1.8985,
205
+ "loss": 1.7949,
206
+ "loss": 1.7116,