cutoff_len: 1024 | |
dataset: truth_dev_0716 | |
dataset_dir: data | |
do_eval: true | |
finetuning_type: full | |
flash_attn: auto | |
max_new_tokens: 512 | |
max_samples: 100000 | |
model_name_or_path: saves/LLaMA3-8B-Chat/full/train_2024-07-16-09-46-28_llama3 | |
output_dir: saves/LLaMA3-8B-Chat/full/eval_2024-07-16-09-46-28 | |
per_device_eval_batch_size: 2 | |
predict_with_generate: true | |
preprocessing_num_workers: 16 | |
quantization_method: bitsandbytes | |
stage: sft | |
temperature: 0.95 | |
template: llama3 | |
top_p: 0.7 | |