mtasic85 commited on
Commit
11609de
1 Parent(s): 9e5bc22

contrain model

Browse files
README.md CHANGED
@@ -263,3 +263,11 @@ litgpt evaluate --tasks 'mmlu_multilingual,mgsm' --out_dir 'evaluate-multilingua
263
  ```bash
264
  litgpt evaluate --tasks 'wikitext,qasper' --out_dir 'evaluate-long/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
265
  ```
 
 
 
 
 
 
 
 
 
263
  ```bash
264
  litgpt evaluate --tasks 'wikitext,qasper' --out_dir 'evaluate-long/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
265
  ```
266
+
267
+ | Tasks |Version|Filter|n-shot| Metric | | Value | |Stderr|
268
+ |---------------|------:|------|-----:|---------------|---|---------:|---|------|
269
+ |qasper_bool | 1|none | 0|f1 |↑ | 0.0000|± | 0|
270
+ |qasper_freeform| 2|none | 0|f1_abstractive |↑ | 0.0036|± | 0.001|
271
+ |wikitext | 2|none | 0|bits_per_byte |↓ | 3.0634|± | N/A|
272
+ | | |none | 0|byte_perplexity|↓ | 8.3596|± | N/A|
273
+ | | |none | 0|word_perplexity|↓ |85375.3002|± | N/A|
scripts/TRAIN.md CHANGED
@@ -54,6 +54,13 @@ state_dict = torch.load('out/converted_model/model.pth', map_location='cpu')
54
  save_file(state_dict, 'out/converted_model/model.safetensors')
55
  ```
56
 
 
 
 
 
 
 
 
57
  ## Evaluate
58
 
59
  ```bash
 
54
  save_file(state_dict, 'out/converted_model/model.safetensors')
55
  ```
56
 
57
+ ### Continued Pretraining
58
+
59
+ ```bash
60
+ litgpt convert_pretrained_checkpoint out/pretrain/final/ out/pretrain_checkpoint/final/
61
+ litgpt pretrain --config ./contrain-model.yaml
62
+ ```
63
+
64
  ## Evaluate
65
 
66
  ```bash
scripts/contrain-model.yaml ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct/blob/main/config.json
2
+
3
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
4
+ # ``model_config``. (type: Optional[str], default: null)
5
+ model_name: "Llama-3.2-1B"
6
+
7
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
8
+ # ``model_config``. (type: Optional[Config], default: null)
9
+ model_config:
10
+ padded_vocab_size: 32768
11
+ vocab_size: 32768
12
+ block_size: 8192
13
+ n_layer: 8
14
+ n_head: 32
15
+ head_size: 64
16
+ n_embd: 512
17
+ n_query_groups: 8
18
+ rotary_percentage: 1.0
19
+ parallel_residual: true
20
+ shared_attention_norm: false
21
+ bias: false
22
+ norm_class_name: "RMSNorm"
23
+ norm_eps: 1e-05
24
+ mlp_class_name: "LLaMAMLP"
25
+ intermediate_size: 2048
26
+ rope_base: 500000
27
+ rope_adjustments:
28
+ factor: 32.0
29
+ low_freq_factor: 1.0
30
+ high_freq_factor: 4.0
31
+ original_max_seq_len: 8192
32
+
33
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
34
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
35
+ out_dir: "../out/contrain/"
36
+
37
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
38
+ # precision: bf16-mixed
39
+ precision: bf16-true
40
+
41
+ # Optional path to a checkpoint directory to initialize the model from.
42
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
43
+ initial_checkpoint_dir: out/pretrain_checkpoint/final/
44
+
45
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
46
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
47
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
48
+ # (type: Union[bool, Literal["auto"], Path], default: False)
49
+ resume: false
50
+ # resume: "auto"
51
+
52
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
53
+ data:
54
+ class_path: LitData
55
+
56
+ init_args:
57
+ data_path: "../contrain-data/"
58
+ num_workers: 32
59
+
60
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
61
+ train:
62
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
63
+ save_interval: 200
64
+
65
+ # Number of iterations between logging calls (type: int, default: 1)
66
+ log_interval: 1
67
+
68
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
69
+ global_batch_size: 512
70
+
71
+ # Number of samples per data-parallel rank (type: int, default: 4)
72
+ micro_batch_size: 4
73
+
74
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
75
+ lr_warmup_steps: 1000
76
+
77
+ # Number of epochs to train on (type: Optional[int], default: null)
78
+ epochs:
79
+
80
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
81
+ # max_tokens: 19626937782 # 1197787 * 8193 * 2
82
+ max_tokens: 9813468891 # 1197787 * 8193 * 1
83
+
84
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
85
+ max_steps:
86
+
87
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
88
+ max_seq_length: 8193
89
+
90
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
91
+ tie_embeddings: true
92
+
93
+ # (type: Optional[float], default: 1.0)
94
+ max_norm: 1.0
95
+
96
+ # (type: float, default: 4e-05)
97
+ min_lr: 1e-05
98
+
99
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
100
+ eval:
101
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
102
+ interval: 100
103
+
104
+ # Number of tokens to generate (type: Optional[int], default: null)
105
+ max_new_tokens:
106
+
107
+ # Number of iterations (type: int, default: 100)
108
+ max_iters: 100
109
+
110
+ # Whether to evaluate on the validation set at the beginning of the training
111
+ initial_validation: false
112
+
113
+ # Whether to evaluate on the validation set at the end the training
114
+ final_validation: true
115
+
116
+ # Optimizer-related arguments
117
+ optimizer:
118
+ # class_path: torch.optim.AdamW
119
+ class_path: grokadamw.GrokAdamW
120
+
121
+ init_args:
122
+ # (type: float, default: 0.001)
123
+ lr: 4e-05
124
+
125
+ # (type: float, default: 0.01)
126
+ weight_decay: 1e-2
127
+
128
+ # (type: tuple, default: (0.9,0.999))
129
+ betas:
130
+ - 0.9
131
+ - 0.999
132
+
133
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
134
+ devices: auto
135
+
136
+ # How many nodes to use. (type: int, default: 1)
137
+ num_nodes: 1
138
+
139
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
140
+ # module require this. (type: Optional[Path], default: null)
141
+ tokenizer_dir: "../"
142
+
143
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
144
+ logger_name: "wandb"
145
+
146
+ # The random seed to use for reproducibility. (type: int, default: 42)
147
+ seed: 23
scripts/pretrain-model.yaml CHANGED
@@ -72,7 +72,6 @@ train:
72
  micro_batch_size: 16
73
 
74
  # Number of iterations with learning rate warmup active (type: int, default: 2000)
75
- # lr_warmup_steps: 2000
76
  lr_warmup_steps: 10
77
 
78
  # Number of epochs to train on (type: Optional[int], default: null)
@@ -118,8 +117,6 @@ eval:
118
  optimizer:
119
  # class_path: torch.optim.AdamW
120
  class_path: grokadamw.GrokAdamW
121
- # class_path: bitsandbytes.optim.AdamW8bit
122
- # class_path: bitsandbytes.optim.PagedAdamW8bit
123
 
124
  init_args:
125
  # (type: float, default: 0.001)
 
72
  micro_batch_size: 16
73
 
74
  # Number of iterations with learning rate warmup active (type: int, default: 2000)
 
75
  lr_warmup_steps: 10
76
 
77
  # Number of epochs to train on (type: Optional[int], default: null)
 
117
  optimizer:
118
  # class_path: torch.optim.AdamW
119
  class_path: grokadamw.GrokAdamW
 
 
120
 
121
  init_args:
122
  # (type: float, default: 0.001)