|
# Train |
|
|
|
## Environment |
|
|
|
```bash |
|
cd scripts |
|
python -m venv venv |
|
source venv/bin/activate |
|
pip install -U -r requirements.in |
|
``` |
|
|
|
## Tokenizer |
|
|
|
```bash |
|
python -B train_tokenizer.py |
|
``` |
|
|
|
## Dataset |
|
|
|
```bash |
|
python -B prepare_pretrain_dataset.py |
|
``` |
|
|
|
## Model |
|
|
|
### Pretrain |
|
|
|
```bash |
|
litgpt pretrain --config ./pretrain-model.yaml |
|
``` |
|
|
|
```bash |
|
litgpt convert_from_litgpt out/pretrain/final/ out/converted_model |
|
cp config.json out/pretrain/final/ |
|
cp config.json out/converted_model/ |
|
``` |
|
|
|
```python |
|
import torch |
|
from safetensors.torch import save_file |
|
|
|
state_dict = torch.load('out/converted_model/model.pth', map_location='cpu') |
|
save_file(state_dict, 'out/converted_model/model.safetensors') |
|
``` |
|
|
|
## Evaluate |
|
|
|
```bash |
|
litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-0/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --out_dir 'evaluate-1/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'mmlu_pro,ifeval,mgsm_direct,mathqa,gpqa' --out_dir 'evaluate-2/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
``` |
|
|