See axolotl config
axolotl version: 0.5.2
base_model: meta-llama/Llama-3.1-8B
model_type: LlamaForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: false
load_in_4bit: false
strict: false
datasets:
- path: murugeshmarvel/iter4_qad_set4
type: alpaca:instruct
train_on_split: train
test_datasets:
- path: murugeshmarvel/iter4_qad_set4
type: alpaca:instruct
split: test
unfrozen_parameters:
- ^lm_head.weight$
- ^model.embed_tokens.weight$
# input_layernorm layers
- model.layers.0.input_layernorm
- model.layers.1.input_layernorm
- model.layers.2.input_layernorm
- model.layers.3.input_layernorm
- model.layers.4.input_layernorm
- model.layers.5.input_layernorm
- model.layers.6.input_layernorm
- model.layers.7.input_layernorm
- model.layers.8.input_layernorm
- model.layers.9.input_layernorm
- model.layers.10.input_layernorm
- model.layers.11.input_layernorm
- model.layers.12.input_layernorm
- model.layers.13.input_layernorm
- model.layers.14.input_layernorm
- model.layers.15.input_layernorm
# lm_head layers
# mlp.down_proj layers
- model.layers.0.mlp.down_proj
- model.layers.1.mlp.down_proj
- model.layers.30.mlp.down_proj
- model.layers.2.mlp.down_proj
- model.layers.21.mlp.down_proj
- model.layers.29.mlp.down_proj
- model.layers.22.mlp.down_proj
- model.layers.5.mlp.down_proj
- model.layers.4.mlp.down_proj
- model.layers.20.mlp.down_proj
- model.layers.23.mlp.down_proj
- model.layers.19.mlp.down_proj
- model.layers.3.mlp.down_proj
- model.layers.17.mlp.down_proj
- model.layers.6.mlp.down_proj
- model.layers.31.mlp.down_proj
# mlp.gate_proj layers
- model.layers.1.mlp.gate_proj
- model.layers.2.mlp.gate_proj
- model.layers.3.mlp.gate_proj
- model.layers.4.mlp.gate_proj
- model.layers.0.mlp.gate_proj
- model.layers.25.mlp.gate_proj
- model.layers.26.mlp.gate_proj
- model.layers.5.mlp.gate_proj
- model.layers.24.mlp.gate_proj
- model.layers.28.mlp.gate_proj
- model.layers.23.mlp.gate_proj
- model.layers.27.mlp.gate_proj
- model.layers.21.mlp.gate_proj
- model.layers.22.mlp.gate_proj
- model.layers.29.mlp.gate_proj
- model.layers.20.mlp.gate_proj
# mlp.up_proj layers
- model.layers.4.mlp.up_proj
- model.layers.3.mlp.up_proj
- model.layers.0.mlp.up_proj
- model.layers.7.mlp.up_proj
- model.layers.5.mlp.up_proj
- model.layers.6.mlp.up_proj
- model.layers.2.mlp.up_proj
- model.layers.1.mlp.up_proj
- model.layers.8.mlp.up_proj
- model.layers.14.mlp.up_proj
- model.layers.12.mlp.up_proj
- model.layers.9.mlp.up_proj
- model.layers.15.mlp.up_proj
- model.layers.17.mlp.up_proj
- model.layers.13.mlp.up_proj
- model.layers.19.mlp.up_proj
# model.embed_tokens layers
# model.norm layers
# post_attention_layernorm layers
- model.layers.0.post_attention_layernorm
- model.layers.1.post_attention_layernorm
- model.layers.2.post_attention_layernorm
- model.layers.3.post_attention_layernorm
- model.layers.4.post_attention_layernorm
- model.layers.5.post_attention_layernorm
- model.layers.6.post_attention_layernorm
- model.layers.7.post_attention_layernorm
- model.layers.8.post_attention_layernorm
- model.layers.9.post_attention_layernorm
- model.layers.10.post_attention_layernorm
- model.layers.11.post_attention_layernorm
- model.layers.12.post_attention_layernorm
- model.layers.13.post_attention_layernorm
- model.layers.14.post_attention_layernorm
- model.layers.15.post_attention_layernorm
# self_attn.k_proj layers
- model.layers.29.self_attn.k_proj
- model.layers.25.self_attn.k_proj
- model.layers.23.self_attn.k_proj
- model.layers.28.self_attn.k_proj
- model.layers.21.self_attn.k_proj
- model.layers.19.self_attn.k_proj
- model.layers.22.self_attn.k_proj
- model.layers.20.self_attn.k_proj
- model.layers.24.self_attn.k_proj
- model.layers.31.self_attn.k_proj
- model.layers.27.self_attn.k_proj
- model.layers.26.self_attn.k_proj
- model.layers.17.self_attn.k_proj
- model.layers.11.self_attn.k_proj
- model.layers.18.self_attn.k_proj
- model.layers.14.self_attn.k_proj
# self_attn.o_proj layers
- model.layers.14.self_attn.o_proj
- model.layers.7.self_attn.o_proj
- model.layers.5.self_attn.o_proj
- model.layers.11.self_attn.o_proj
- model.layers.6.self_attn.o_proj
- model.layers.24.self_attn.o_proj
- model.layers.9.self_attn.o_proj
- model.layers.13.self_attn.o_proj
- model.layers.10.self_attn.o_proj
- model.layers.12.self_attn.o_proj
- model.layers.8.self_attn.o_proj
- model.layers.25.self_attn.o_proj
- model.layers.21.self_attn.o_proj
- model.layers.23.self_attn.o_proj
- model.layers.15.self_attn.o_proj
- model.layers.16.self_attn.o_proj
# self_attn.q_proj layers
- model.layers.8.self_attn.q_proj
- model.layers.13.self_attn.q_proj
- model.layers.9.self_attn.q_proj
- model.layers.14.self_attn.q_proj
- model.layers.10.self_attn.q_proj
- model.layers.11.self_attn.q_proj
- model.layers.0.self_attn.q_proj
- model.layers.15.self_attn.q_proj
- model.layers.1.self_attn.q_proj
- model.layers.6.self_attn.q_proj
- model.layers.5.self_attn.q_proj
- model.layers.7.self_attn.q_proj
- model.layers.12.self_attn.q_proj
- model.layers.16.self_attn.q_proj
- model.layers.17.self_attn.q_proj
- model.layers.26.self_attn.q_proj
# self_attn.v_proj layers
- model.layers.26.self_attn.v_proj
- model.layers.17.self_attn.v_proj
- model.layers.3.self_attn.v_proj
- model.layers.28.self_attn.v_proj
- model.layers.29.self_attn.v_proj
- model.layers.21.self_attn.v_proj
- model.layers.15.self_attn.v_proj
- model.layers.16.self_attn.v_proj
- model.layers.20.self_attn.v_proj
- model.layers.25.self_attn.v_proj
- model.layers.6.self_attn.v_proj
- model.layers.23.self_attn.v_proj
- model.layers.4.self_attn.v_proj
- model.layers.1.self_attn.v_proj
- model.layers.14.self_attn.v_proj
- model.layers.22.self_attn.v_proj
dataset_prepared_path: last_run_prepared
output_dir: ./outputs/qad_base_fft_out
sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true
wandb_project: QAD_ITER4_SET4
wandb_entity:
wandb_watch:
wandb_name: QAD_ITER4_SET4_4GPU
wandb_log_model:
gradient_accumulation_steps: 8
micro_batch_size: 2
eval_batch_size: 3
num_epochs: 5
optimizer: paged_adamw_8bit
lr_scheduler: cosine
learning_rate: 1e-5
train_on_inputs: false
group_by_length: false
bf16: auto
fp16:
tf32: false
gradient_checkpointing: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
s2_attention:
warmup_steps: 10
evals_per_epoch: 2
eval_table_size:
eval_max_new_tokens: 4096
saves_per_epoch: 4
save_total_limit: 2
debug:
deepspeed:
weight_decay: 0.05
fsdp:
fsdp_config:
special_tokens:
pad_token: <|end_of_text|>
outputs/qad_base_fft_out
This model is a fine-tuned version of meta-llama/Llama-3.1-8B on the None dataset. It achieves the following results on the evaluation set:
- Loss: 0.1032
Model description
More information needed
Intended uses & limitations
More information needed
Training and evaluation data
More information needed
Training procedure
Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 2
- eval_batch_size: 3
- seed: 42
- distributed_type: multi-GPU
- num_devices: 2
- gradient_accumulation_steps: 8
- total_train_batch_size: 32
- total_eval_batch_size: 6
- optimizer: Use OptimizerNames.PAGED_ADAMW_8BIT with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
- lr_scheduler_type: cosine
- lr_scheduler_warmup_steps: 10
- num_epochs: 5
Training results
Training Loss | Epoch | Step | Validation Loss |
---|---|---|---|
0.1159 | 0.5018 | 102 | 0.1200 |
0.1883 | 1.0031 | 204 | 0.1116 |
0.086 | 1.5049 | 306 | 0.1093 |
0.1044 | 2.0080 | 408 | 0.1060 |
0.0909 | 2.5098 | 510 | 0.1051 |
0.0754 | 3.0123 | 612 | 0.1036 |
0.0697 | 3.5141 | 714 | 0.1034 |
0.0794 | 4.0160 | 816 | 0.1032 |
0.0839 | 4.5178 | 918 | 0.1032 |
Framework versions
- Transformers 4.46.3
- Pytorch 2.4.1+cu124
- Datasets 3.1.0
- Tokenizers 0.20.3
- Downloads last month
- 69
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.
Model tree for murugeshmarvel/QAD-llama3.1-8b-iter4set4
Base model
meta-llama/Llama-3.1-8B