Training in progress, step 50
Browse files
adapter_config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "alexsherstinsky/Mistral-7B-v0.1-sharded",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layers_pattern": null,
|
10 |
+
"layers_to_transform": null,
|
11 |
+
"loftq_config": {},
|
12 |
+
"lora_alpha": 32,
|
13 |
+
"lora_dropout": 0.1,
|
14 |
+
"megatron_config": null,
|
15 |
+
"megatron_core": "megatron.core",
|
16 |
+
"modules_to_save": null,
|
17 |
+
"peft_type": "LORA",
|
18 |
+
"r": 8,
|
19 |
+
"rank_pattern": {},
|
20 |
+
"revision": null,
|
21 |
+
"target_modules": [
|
22 |
+
"q_proj",
|
23 |
+
"gate_proj",
|
24 |
+
"o_proj",
|
25 |
+
"v_proj",
|
26 |
+
"up_proj",
|
27 |
+
"down_proj",
|
28 |
+
"k_proj"
|
29 |
+
],
|
30 |
+
"task_type": "CAUSAL_LM"
|
31 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:48cf7446d807b64dad8efc7550fd67339cbc56a914fb48d13d339c9089cd1a6f
|
3 |
+
size 42002584
|
runs/Jan22_03-50-11_8b33d498b7ab/events.out.tfevents.1705895557.8b33d498b7ab.1716.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd47c8f60ddb0780c81988e6c31f28a903c27d528325886b42c2fba8649ed19c
|
3 |
+
size 13537
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7073d0d1d81a911e268ce528dc2687feb0d1ead9c517d9b3d5409eeeede95781
|
3 |
+
size 6264
|
training_config.json
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"experiment_key": "base",
|
3 |
+
"save_safetensors": true,
|
4 |
+
"max_shard_size": "10GB",
|
5 |
+
"local_rank": 0,
|
6 |
+
"use_gradient_checkpointing": true,
|
7 |
+
"trainer_key": "lm",
|
8 |
+
"force_fp32": false,
|
9 |
+
"force_fp16": false,
|
10 |
+
"from_gptq": false,
|
11 |
+
"huggingface_hub_token": null,
|
12 |
+
"single_gpu": true,
|
13 |
+
"master_port": 9994,
|
14 |
+
"deepspeed_stage": "stage_2",
|
15 |
+
"deepspeed_config_path": null,
|
16 |
+
"fsdp_strategy": "",
|
17 |
+
"fsdp_offload": true,
|
18 |
+
"seed": 42,
|
19 |
+
"stabilize": false,
|
20 |
+
"norm_fp32": false,
|
21 |
+
"path_to_env_file": "./.env",
|
22 |
+
"prepare_dataset": true,
|
23 |
+
"lora_hub_model_id": null,
|
24 |
+
"lora_model_local_path": null,
|
25 |
+
"fused_model_local_path": null,
|
26 |
+
"fuse_after_training": false,
|
27 |
+
"quantization_dataset_id": null,
|
28 |
+
"quantization_max_samples": 1024,
|
29 |
+
"quantized_model_path": "./quantized_model/",
|
30 |
+
"quantized_hub_model_id": null,
|
31 |
+
"quantized_hub_private_repo": true,
|
32 |
+
"dataset_key": "soda",
|
33 |
+
"train_local_path_to_data": "./train.jsonl",
|
34 |
+
"eval_local_path_to_data": null,
|
35 |
+
"shuffle": true,
|
36 |
+
"max_eval_samples": 1000,
|
37 |
+
"add_eval_to_train_if_no_path": false,
|
38 |
+
"tokenizer_name_or_path": null,
|
39 |
+
"tokenizer_use_fast": null,
|
40 |
+
"tokenizer_padding_side": null,
|
41 |
+
"collator_key": "lm",
|
42 |
+
"max_length": 2048,
|
43 |
+
"model_name_or_path": "alexsherstinsky/Mistral-7B-v0.1-sharded",
|
44 |
+
"push_to_hub_bos_add_bos_token": false,
|
45 |
+
"use_flash_attention_2": false,
|
46 |
+
"trust_remote_code": true,
|
47 |
+
"device_map": null,
|
48 |
+
"prepare_model_for_kbit_training": true,
|
49 |
+
"offload_folder": null,
|
50 |
+
"load_in_8bit": false,
|
51 |
+
"load_in_4bit": true,
|
52 |
+
"llm_int8_threshold": 6.0,
|
53 |
+
"llm_int8_has_fp16_weight": true,
|
54 |
+
"bnb_4bit_use_double_quant": true,
|
55 |
+
"bnb_4bit_quant_type": "nf4",
|
56 |
+
"bnb_quantize_after_model_init": false,
|
57 |
+
"gptq_bits": 4,
|
58 |
+
"gptq_group_size": 128,
|
59 |
+
"gptq_disable_exllama": true,
|
60 |
+
"apply_lora": true,
|
61 |
+
"lora_rank": 8,
|
62 |
+
"lora_alpha": 32,
|
63 |
+
"lora_dropout": 0.1,
|
64 |
+
"raw_lora_target_modules": "all",
|
65 |
+
"output_dir": "./outputs/",
|
66 |
+
"per_device_train_batch_size": 2,
|
67 |
+
"do_eval": false,
|
68 |
+
"per_device_eval_batch_size": null,
|
69 |
+
"gradient_accumulation_steps": 2,
|
70 |
+
"eval_accumulation_steps": null,
|
71 |
+
"eval_delay": 0,
|
72 |
+
"eval_steps": 1000,
|
73 |
+
"warmup_steps": 5,
|
74 |
+
"max_steps": 101,
|
75 |
+
"num_train_epochs": 1,
|
76 |
+
"learning_rate": 0.0002,
|
77 |
+
"max_grad_norm": 1.0,
|
78 |
+
"weight_decay": 0.001,
|
79 |
+
"label_smoothing_factor": 0.0,
|
80 |
+
"logging_steps": 1,
|
81 |
+
"save_steps": 50,
|
82 |
+
"save_total_limit": 1,
|
83 |
+
"optim": "paged_adamw_8bit",
|
84 |
+
"push_to_hub": true,
|
85 |
+
"hub_model_id": "mogaio/Mistral-2x2_8-LoRA",
|
86 |
+
"hub_private_repo": false,
|
87 |
+
"neftune_noise_alpha": null,
|
88 |
+
"project_name": null,
|
89 |
+
"report_to_wandb": false,
|
90 |
+
"wandb_api_key": null,
|
91 |
+
"wandb_project": "Mistral-hh-rlhf-fine-tuning",
|
92 |
+
"wandb_entity": "Mistral-hh-rlhf"
|
93 |
+
}
|