model_family: llama2-7b | |
LoRA: | |
r: 0 | |
alpha: 32 | |
dropout: 0.05 | |
data_path: locuslab/TOFU | |
split: full | |
batch_size: 4 | |
gradient_accumulation_steps: 4 | |
num_epochs: 5 | |
lr: 1.0e-05 | |
save_dir: paper_models/final_ft_noLORA_5_epochs_inst_lr${lr}_${model_family}_${split} | |
weight_decay: 0.01 | |