cutoff_len: 1024 | |
dataset: alpaca_en | |
dataset_dir: data | |
do_train: true | |
finetuning_type: lora | |
flash_attn: auto | |
fp16: true | |
gradient_accumulation_steps: 8 | |
learning_rate: 5.0e-05 | |
logging_steps: 5 | |
lora_alpha: 16 | |
lora_dropout: 0 | |
lora_rank: 8 | |
lora_target: q_proj,v_proj | |
lr_scheduler_type: cosine | |
max_grad_norm: 1.0 | |
max_samples: 100000 | |
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct | |
num_train_epochs: 3.0 | |
optim: adamw_torch | |
output_dir: saves/LLaMA3-8B/lora/train_2024-05-11-18-42-27 | |
packing: false | |
per_device_train_batch_size: 2 | |
report_to: none | |
save_steps: 100 | |
stage: sft | |
template: default | |
warmup_steps: 0 | |