deepseek-llm-7b-chat-sa-v0.1 / trainer_config.yaml
sci-m-wang's picture
Upload 13 files
740432a verified
raw
history blame contribute delete
665 Bytes
bf16: true
cutoff_len: 4096
dataset: LangGPT_community,LangGPT_alpaca,LangGPT_seed
dataset_dir: /datas/wangm/LLM4LangGPT
do_train: true
finetuning_type: lora
flash_attn: auto
gradient_accumulation_steps: 8
learning_rate: 5.0e-05
logging_steps: 5
lora_alpha: 16
lora_dropout: 0
lora_rank: 8
lora_target: q_proj,v_proj
lr_scheduler_type: cosine
max_grad_norm: 1.0
max_samples: 100000
model_name_or_path: deepseek-ai/deepseek-llm-7b-chat
num_train_epochs: 5.0
optim: adamw_torch
output_dir: /datas/wangm/LLM4LangGPT/output/deepseek-llm-7b-chat
packing: false
per_device_train_batch_size: 2
report_to: none
save_steps: 100
stage: sft
template: deepseek
warmup_steps: 0