marianna13 commited on
Commit
3156b69
1 Parent(s): ab2596c

Upload configs.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. configs.yaml +36 -0
configs.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ cutoff_len: 512
3
+ dataset: llamafactory/alpaca_en
4
+ dataset_dir: ONLINE
5
+ ddp_timeout: 180000000
6
+ deepspeed: dcft/train/zero1.json
7
+ do_train: true
8
+ enable_liger_kernel: true
9
+ eval_strategy: epoch
10
+ finetuning_type: full
11
+ formatting: alpaca
12
+ gradient_accumulation_steps: 4
13
+ gradient_checkpointing: true
14
+ hub_model_id: marianna13/meta-llama-3.1-8b-alpaca-sft-sample
15
+ learning_rate: 2.0e-05
16
+ logging_steps: 10
17
+ lr_scheduler_type: cosine
18
+ model_name_or_path: meta-llama/Meta-Llama-3.1-8B
19
+ neat_packing: true
20
+ num_train_epochs: 1.0
21
+ output_dir: experiments/train/checkpoints/mistral_alpaca_sft_sample
22
+ overwrite_cache: true
23
+ overwrite_output_dir: true
24
+ packing: true
25
+ per_device_train_batch_size: 16
26
+ plot_loss: true
27
+ preprocessing_num_workers: 16
28
+ push_to_db: true
29
+ push_to_hub: true
30
+ report_to: wandb
31
+ run_name: llama3_alpaca_sft_sample
32
+ save_strategy: epoch
33
+ stage: sft
34
+ template: alpaca
35
+ val_size: 0.05
36
+ warmup_ratio: 0.1