diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9a2b5e5c9b00d9f99f75b30747fed573d35668c1
--- /dev/null
+++ b/README.md
@@ -0,0 +1,134 @@
+---
+license: apache-2.0
+base_model: mistralai/Mistral-7B-v0.1
+tags:
+- generated_from_trainer
+model-index:
+- name: out
+ results: []
+---
+
+
+
+[](https://github.com/OpenAccess-AI-Collective/axolotl)
+See axolotl config
+
+axolotl version: `0.4.0`
+```yaml
+base_model: mistralai/Mistral-7B-v0.1
+model_type: MistralForCausalLM
+tokenizer_type: LlamaTokenizer
+
+load_in_8bit: false
+load_in_4bit: false
+strict: false
+
+datasets:
+ - path: mhenrichsen/alpaca_2k_test
+ type: alpaca
+dataset_prepared_path:
+val_set_size: 0.05
+output_dir: ./out
+
+sequence_len: 8192
+sample_packing: true
+pad_to_sequence_len: true
+eval_sample_packing: false
+
+wandb_project:
+wandb_entity:
+wandb_watch:
+wandb_name:
+wandb_log_model:
+
+gradient_accumulation_steps: 4
+micro_batch_size: 2
+num_epochs: 4
+optimizer: adamw_bnb_8bit
+lr_scheduler: cosine
+learning_rate: 0.000005
+
+train_on_inputs: false
+group_by_length: false
+bf16: auto
+fp16:
+tf32: false
+
+gradient_checkpointing: true
+early_stopping_patience:
+resume_from_checkpoint:
+local_rank:
+logging_steps: 1
+xformers_attention:
+flash_attention: true
+
+warmup_steps: 10
+evals_per_epoch: 4
+eval_table_size:
+eval_max_new_tokens: 128
+saves_per_epoch: 1
+debug:
+deepspeed:
+weight_decay: 0.0
+fsdp:
+fsdp_config:
+special_tokens:
+
+```
+
+
+
+# out
+
+This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
+It achieves the following results on the evaluation set:
+- Loss: 0.9125
+
+## Model description
+
+More information needed
+
+## Intended uses & limitations
+
+More information needed
+
+## Training and evaluation data
+
+More information needed
+
+## Training procedure
+
+### Training hyperparameters
+
+The following hyperparameters were used during training:
+- learning_rate: 5e-06
+- train_batch_size: 2
+- eval_batch_size: 2
+- seed: 42
+- distributed_type: multi-GPU
+- num_devices: 4
+- gradient_accumulation_steps: 4
+- total_train_batch_size: 32
+- total_eval_batch_size: 8
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- lr_scheduler_type: cosine
+- lr_scheduler_warmup_steps: 10
+- num_epochs: 4
+
+### Training results
+
+| Training Loss | Epoch | Step | Validation Loss |
+|:-------------:|:-----:|:----:|:---------------:|
+| 0.9711 | 0.8 | 1 | 1.0638 |
+| 0.9758 | 1.4 | 2 | 1.0348 |
+| 0.9554 | 2.2 | 3 | 0.9725 |
+| 0.9379 | 2.8 | 4 | 0.9125 |
+
+
+### Framework versions
+
+- Transformers 4.40.0.dev0
+- Pytorch 2.1.2+cu118
+- Datasets 2.18.0
+- Tokenizers 0.15.0
diff --git a/checkpoint-1/config.json b/checkpoint-1/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..483dd03fb9018116cb76b988c449d4933fbeec93
--- /dev/null
+++ b/checkpoint-1/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.0.dev0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-1/generation_config.json b/checkpoint-1/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..16dd90acbcc482b30661bf1c48c719fec177f4a8
--- /dev/null
+++ b/checkpoint-1/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "do_sample": true,
+ "eos_token_id": 2,
+ "transformers_version": "4.40.0.dev0"
+}
diff --git a/checkpoint-1/model-00001-of-00003.safetensors b/checkpoint-1/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4a3bd65cf27ee9ae14e51525e9d81efe65118ee8
--- /dev/null
+++ b/checkpoint-1/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65597bcc8f709a6795dbdeac42c6a0c57e3b9a5dd77679988455bae99d64c6f0
+size 4943162336
diff --git a/checkpoint-1/model-00002-of-00003.safetensors b/checkpoint-1/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..924d3dc21ef36288d0e774ee30af29cda592a561
--- /dev/null
+++ b/checkpoint-1/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fdbb434faaa08d15234c4ec728cd05b887f510b6a5e5418a4171a47f8b906ec
+size 4999819336
diff --git a/checkpoint-1/model-00003-of-00003.safetensors b/checkpoint-1/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2ccbf9911f2e7f478cf353232b40fbc92ba59d17
--- /dev/null
+++ b/checkpoint-1/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9bcf56354ec0c68b5f8e97b4f3b02d16af899a65b0868d6dba5a51c1b30f01cb
+size 4540516344
diff --git a/checkpoint-1/model.safetensors.index.json b/checkpoint-1/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..b349bc0ce8075a2a5df1e0210450308018d396c8
--- /dev/null
+++ b/checkpoint-1/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 14483464192
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00003-of-00003.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.norm.weight": "model-00003-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-1/optimizer.pt b/checkpoint-1/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a6ac8415e23e79cef0cc39d2ab0e44ae87f01b99
--- /dev/null
+++ b/checkpoint-1/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:314635da7c0a8789b8fb87803bd23689d66b7858d893d39c2457ee0ca7eb6795
+size 14512103240
diff --git a/checkpoint-1/rng_state_0.pth b/checkpoint-1/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c52ec8f5d66c6a990609422386c047d0c3ed3970
--- /dev/null
+++ b/checkpoint-1/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:543ef05f530d40ee20b8d626b07a69b86597aca643e48897571062f973efe84f
+size 15024
diff --git a/checkpoint-1/rng_state_1.pth b/checkpoint-1/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7e4ae755d2c391c6486028b2ab09f40e1e5b6b3f
--- /dev/null
+++ b/checkpoint-1/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a23f732e43838ce0398d2636885ac16badbb9bcbc04d1406069ba3027bc5ae0
+size 15024
diff --git a/checkpoint-1/rng_state_2.pth b/checkpoint-1/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..47425e0477082be97b4d8dda14c0159e7914ebb0
--- /dev/null
+++ b/checkpoint-1/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e10cce960e7068b051c05e35ed6160656be9091c63f13796ac2ed7e9c84e5a72
+size 15024
diff --git a/checkpoint-1/rng_state_3.pth b/checkpoint-1/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..adaf9621fc3ca0a14f99862b58c3bebc5b7168e3
--- /dev/null
+++ b/checkpoint-1/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6f6049e212b1df5cefc5d834afcd8cc052c73f1457449e9fe8a38d514f54078
+size 15024
diff --git a/checkpoint-1/scheduler.pt b/checkpoint-1/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3f61910d0a815ff11df2c6cf1b2f747d205ac7ee
--- /dev/null
+++ b/checkpoint-1/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a6e130070a7cfca0a576e3986b88d148ad515c0d22f0b8b03681831bcbf20bbd
+size 1064
diff --git a/checkpoint-1/special_tokens_map.json b/checkpoint-1/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-1/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1/tokenizer.model b/checkpoint-1/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/checkpoint-1/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/checkpoint-1/tokenizer_config.json b/checkpoint-1/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44ceae3369b580af560afc8670fe5db6f3296960
--- /dev/null
+++ b/checkpoint-1/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-1/trainer_state.json b/checkpoint-1/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..be94c2f81173cf213528482353135d1cc3328fe7
--- /dev/null
+++ b/checkpoint-1/trainer_state.json
@@ -0,0 +1,36 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.8,
+ "eval_steps": 1,
+ "global_step": 1,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.8,
+ "grad_norm": 15.75,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9711,
+ "step": 1
+ },
+ {
+ "epoch": 0.8,
+ "eval_loss": 1.0638165473937988,
+ "eval_runtime": 30.4688,
+ "eval_samples_per_second": 3.282,
+ "eval_steps_per_second": 0.427,
+ "step": 1
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 4,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 1,
+ "total_flos": 1.1184101281234944e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1/training_args.bin b/checkpoint-1/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..64c08307f1c0ca5b09d8fb5951f3580e76d420a8
--- /dev/null
+++ b/checkpoint-1/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de02e7bbff2d5b0589a4a17365760e0a58df0fe599195f544f24e2703a459993
+size 5752
diff --git a/checkpoint-2/config.json b/checkpoint-2/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..483dd03fb9018116cb76b988c449d4933fbeec93
--- /dev/null
+++ b/checkpoint-2/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.0.dev0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-2/generation_config.json b/checkpoint-2/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..16dd90acbcc482b30661bf1c48c719fec177f4a8
--- /dev/null
+++ b/checkpoint-2/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "do_sample": true,
+ "eos_token_id": 2,
+ "transformers_version": "4.40.0.dev0"
+}
diff --git a/checkpoint-2/model-00001-of-00003.safetensors b/checkpoint-2/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fda1c7308ab1520271003854554ae77d743e13d3
--- /dev/null
+++ b/checkpoint-2/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6fcb06479b00cc476efc80e76d894b38a65a615619c764fc4e2097641e87e9ff
+size 4943162336
diff --git a/checkpoint-2/model-00002-of-00003.safetensors b/checkpoint-2/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ba8d23c1082d11fc1b7153ea3ad962109281b0da
--- /dev/null
+++ b/checkpoint-2/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b686b335effab3843fc23d06f6ed3f9fea02b33b2dd70feddef62bff51c8b91e
+size 4999819336
diff --git a/checkpoint-2/model-00003-of-00003.safetensors b/checkpoint-2/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2f0e34950ce74e19a2f97c6cd76078796a3feafa
--- /dev/null
+++ b/checkpoint-2/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79281e0487c807d2adae1b4624396a0ad2f5fd4d45f8191b053eedd167df8506
+size 4540516344
diff --git a/checkpoint-2/model.safetensors.index.json b/checkpoint-2/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..b349bc0ce8075a2a5df1e0210450308018d396c8
--- /dev/null
+++ b/checkpoint-2/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 14483464192
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00003-of-00003.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.norm.weight": "model-00003-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-2/optimizer.pt b/checkpoint-2/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..13af1c19f29a1e32e722758f8ad91f50832477ef
--- /dev/null
+++ b/checkpoint-2/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9dfdaa49d2c88a3564c84b130eae6cbebea941e41555733ac1aa2c6ec2b172d3
+size 14512103240
diff --git a/checkpoint-2/rng_state_0.pth b/checkpoint-2/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..98b311637714347fea3f1576e11420705a781eff
--- /dev/null
+++ b/checkpoint-2/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
+size 15024
diff --git a/checkpoint-2/rng_state_1.pth b/checkpoint-2/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1729c081c7ba525bd3eb7f8153f2321acc57e46b
--- /dev/null
+++ b/checkpoint-2/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
+size 15024
diff --git a/checkpoint-2/rng_state_2.pth b/checkpoint-2/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..db505ad4459dd88be94876a144ed43ff79a906fc
--- /dev/null
+++ b/checkpoint-2/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
+size 15024
diff --git a/checkpoint-2/rng_state_3.pth b/checkpoint-2/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c068b948e60e4fb203cf25886ffdbb9e9ea88752
--- /dev/null
+++ b/checkpoint-2/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
+size 15024
diff --git a/checkpoint-2/scheduler.pt b/checkpoint-2/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e9d5126998275db55fd74a36ee40354e62663a74
--- /dev/null
+++ b/checkpoint-2/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f27a2c20aeca06a3d851659ef1eaae71795ab50ff838697417d0b941f2228592
+size 1064
diff --git a/checkpoint-2/special_tokens_map.json b/checkpoint-2/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-2/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-2/tokenizer.model b/checkpoint-2/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/checkpoint-2/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/checkpoint-2/tokenizer_config.json b/checkpoint-2/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44ceae3369b580af560afc8670fe5db6f3296960
--- /dev/null
+++ b/checkpoint-2/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-2/trainer_state.json b/checkpoint-2/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b7e56772d23f097ec91261843e796e5d09fb0d8
--- /dev/null
+++ b/checkpoint-2/trainer_state.json
@@ -0,0 +1,51 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.4,
+ "eval_steps": 1,
+ "global_step": 2,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.8,
+ "grad_norm": 15.75,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9711,
+ "step": 1
+ },
+ {
+ "epoch": 0.8,
+ "eval_loss": 1.0638165473937988,
+ "eval_runtime": 30.4688,
+ "eval_samples_per_second": 3.282,
+ "eval_steps_per_second": 0.427,
+ "step": 1
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 15.8125,
+ "learning_rate": 1.0000000000000002e-06,
+ "loss": 0.9758,
+ "step": 2
+ },
+ {
+ "epoch": 1.4,
+ "eval_loss": 1.034848690032959,
+ "eval_runtime": 30.4612,
+ "eval_samples_per_second": 3.283,
+ "eval_steps_per_second": 0.427,
+ "step": 2
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 4,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 1,
+ "total_flos": 2.236820256246989e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-2/training_args.bin b/checkpoint-2/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..64c08307f1c0ca5b09d8fb5951f3580e76d420a8
--- /dev/null
+++ b/checkpoint-2/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de02e7bbff2d5b0589a4a17365760e0a58df0fe599195f544f24e2703a459993
+size 5752
diff --git a/checkpoint-3/config.json b/checkpoint-3/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..483dd03fb9018116cb76b988c449d4933fbeec93
--- /dev/null
+++ b/checkpoint-3/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.0.dev0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-3/generation_config.json b/checkpoint-3/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..16dd90acbcc482b30661bf1c48c719fec177f4a8
--- /dev/null
+++ b/checkpoint-3/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "do_sample": true,
+ "eos_token_id": 2,
+ "transformers_version": "4.40.0.dev0"
+}
diff --git a/checkpoint-3/model-00001-of-00003.safetensors b/checkpoint-3/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6b65e9266061d3d2001a2bd22aa1f26a34e710eb
--- /dev/null
+++ b/checkpoint-3/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:61a85d0fdf1d00249abc83a72b6d10f82b0b25965bd51160f9d84cbc0c8924a1
+size 4943162336
diff --git a/checkpoint-3/model-00002-of-00003.safetensors b/checkpoint-3/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2a0fe61140ff6fcf939ce663aa8ba05b717b3ba9
--- /dev/null
+++ b/checkpoint-3/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f40c7b66f99207960b320c4ddf8b4369946f6c90f466b7c586eee2b7456d1ca
+size 4999819336
diff --git a/checkpoint-3/model-00003-of-00003.safetensors b/checkpoint-3/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..66cf2a224766c3ff164e2e7daccd2dc413555cb1
--- /dev/null
+++ b/checkpoint-3/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b5fc294451425d77f76a1ed3261825addfb606836aa45e6f8bfa97938f8f6a9
+size 4540516344
diff --git a/checkpoint-3/model.safetensors.index.json b/checkpoint-3/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..b349bc0ce8075a2a5df1e0210450308018d396c8
--- /dev/null
+++ b/checkpoint-3/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 14483464192
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00003-of-00003.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.norm.weight": "model-00003-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-3/optimizer.pt b/checkpoint-3/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e6e55a8f2a9feb04d684804cc74602b288564923
--- /dev/null
+++ b/checkpoint-3/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:506f13c507ffe20d300e6d676b620c093a3987c4c03d5750a1a65662be4ef49d
+size 14512103240
diff --git a/checkpoint-3/rng_state_0.pth b/checkpoint-3/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d926276efddc5c076860537e83872f7dbd55e7d7
--- /dev/null
+++ b/checkpoint-3/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e7d74de51245105e1fbf57a6707ef3538b353952485508f6e2f8f74dc5d479d4
+size 15024
diff --git a/checkpoint-3/rng_state_1.pth b/checkpoint-3/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3d56821ed5d46ed9da9ff33efcd7e652dfd7ce1b
--- /dev/null
+++ b/checkpoint-3/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0617c9eb6cf7df57b2e0bb53cfe17c05f0910de56fe5b14427fe39ab54a44782
+size 15024
diff --git a/checkpoint-3/rng_state_2.pth b/checkpoint-3/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fcdc411d25a2ca81352f507f4efefa922a794707
--- /dev/null
+++ b/checkpoint-3/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed68a365057022897d9645ee60902a77102f43215dcdf2ddd5d3842b6a8446d8
+size 15024
diff --git a/checkpoint-3/rng_state_3.pth b/checkpoint-3/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..182355d208a87c8848a8896294f2edb7aa2f4875
--- /dev/null
+++ b/checkpoint-3/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63ebaa0c302cadbdfcd9f8ee2289e35ecf9c9fc8c9968fc0c05f100dac20c6b9
+size 15024
diff --git a/checkpoint-3/scheduler.pt b/checkpoint-3/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..161479a9403525cf0446aa6ab570d7a58c0b7d36
--- /dev/null
+++ b/checkpoint-3/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5695e7cd6d1d1131dde4c2351675933967b4089747e57e568d38d4a6a40afd43
+size 1064
diff --git a/checkpoint-3/special_tokens_map.json b/checkpoint-3/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-3/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-3/tokenizer.model b/checkpoint-3/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/checkpoint-3/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/checkpoint-3/tokenizer_config.json b/checkpoint-3/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44ceae3369b580af560afc8670fe5db6f3296960
--- /dev/null
+++ b/checkpoint-3/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-3/trainer_state.json b/checkpoint-3/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..812aeb302b267139bb7c824dade6cb01e38bf92d
--- /dev/null
+++ b/checkpoint-3/trainer_state.json
@@ -0,0 +1,66 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.2,
+ "eval_steps": 1,
+ "global_step": 3,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.8,
+ "grad_norm": 15.75,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9711,
+ "step": 1
+ },
+ {
+ "epoch": 0.8,
+ "eval_loss": 1.0638165473937988,
+ "eval_runtime": 30.4688,
+ "eval_samples_per_second": 3.282,
+ "eval_steps_per_second": 0.427,
+ "step": 1
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 15.8125,
+ "learning_rate": 1.0000000000000002e-06,
+ "loss": 0.9758,
+ "step": 2
+ },
+ {
+ "epoch": 1.4,
+ "eval_loss": 1.034848690032959,
+ "eval_runtime": 30.4612,
+ "eval_samples_per_second": 3.283,
+ "eval_steps_per_second": 0.427,
+ "step": 2
+ },
+ {
+ "epoch": 2.2,
+ "grad_norm": 13.3125,
+ "learning_rate": 1.5e-06,
+ "loss": 0.9554,
+ "step": 3
+ },
+ {
+ "epoch": 2.2,
+ "eval_loss": 0.9725089073181152,
+ "eval_runtime": 30.4647,
+ "eval_samples_per_second": 3.282,
+ "eval_steps_per_second": 0.427,
+ "step": 3
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 4,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 1,
+ "total_flos": 3.355230384370483e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-3/training_args.bin b/checkpoint-3/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..64c08307f1c0ca5b09d8fb5951f3580e76d420a8
--- /dev/null
+++ b/checkpoint-3/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de02e7bbff2d5b0589a4a17365760e0a58df0fe599195f544f24e2703a459993
+size 5752
diff --git a/checkpoint-4/config.json b/checkpoint-4/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..483dd03fb9018116cb76b988c449d4933fbeec93
--- /dev/null
+++ b/checkpoint-4/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.0.dev0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-4/generation_config.json b/checkpoint-4/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..16dd90acbcc482b30661bf1c48c719fec177f4a8
--- /dev/null
+++ b/checkpoint-4/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "do_sample": true,
+ "eos_token_id": 2,
+ "transformers_version": "4.40.0.dev0"
+}
diff --git a/checkpoint-4/model-00001-of-00003.safetensors b/checkpoint-4/model-00001-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..afa568c4f429411a180eaa464a7761bd718e8556
--- /dev/null
+++ b/checkpoint-4/model-00001-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5cd3b22b4e0597fa8056bea8816f29e5ff63082c0c771718a22bd3b7e18d0cc7
+size 4943162336
diff --git a/checkpoint-4/model-00002-of-00003.safetensors b/checkpoint-4/model-00002-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b49c2e74987aa6c0cb130972b34b0a175bd8d402
--- /dev/null
+++ b/checkpoint-4/model-00002-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:928ba8efa3816cd870d562fcef2afad00c6c0b451f120446f1938e00baaf83d2
+size 4999819336
diff --git a/checkpoint-4/model-00003-of-00003.safetensors b/checkpoint-4/model-00003-of-00003.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cfde9415a4bebc5f94ffad4fed9d807974838ab1
--- /dev/null
+++ b/checkpoint-4/model-00003-of-00003.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a1a31332cb9f3150b02b7d9bd6afb58cf6dcae814f876ae669ccf7a8afbb8ee
+size 4540516344
diff --git a/checkpoint-4/model.safetensors.index.json b/checkpoint-4/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..b349bc0ce8075a2a5df1e0210450308018d396c8
--- /dev/null
+++ b/checkpoint-4/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 14483464192
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00003-of-00003.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
+ "model.norm.weight": "model-00003-of-00003.safetensors"
+ }
+}
diff --git a/checkpoint-4/optimizer.pt b/checkpoint-4/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..70dc0d2c7f58f88a4a14d2594c00ae1d657304d0
--- /dev/null
+++ b/checkpoint-4/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aab7105bb28965b8013e054cdcea2a2d19c7e7a395ef2082fe2fca1e8c0bf01a
+size 14512103240
diff --git a/checkpoint-4/rng_state_0.pth b/checkpoint-4/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..89e481fa6bcddcac4e3e7741104095957c20101b
--- /dev/null
+++ b/checkpoint-4/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f2439da621f14c22b4f733e91bfc9de6b506d28d7b8d6f3eaca2e0b4f24c078
+size 15024
diff --git a/checkpoint-4/rng_state_1.pth b/checkpoint-4/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..18841fb76f19e1506eb6434be070163da28b13d4
--- /dev/null
+++ b/checkpoint-4/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9e3fb386557f376b8946af5b8c91f9418f374dddb2ad9da4868b1ef16778c32
+size 15024
diff --git a/checkpoint-4/rng_state_2.pth b/checkpoint-4/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6737644855c82850db90b4961982e68a85904c89
--- /dev/null
+++ b/checkpoint-4/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc7774d06045635bece9e960378fdc6913bf7bbbc903444cc570d1ca6ac25645
+size 15024
diff --git a/checkpoint-4/rng_state_3.pth b/checkpoint-4/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8b55550772c705eb979b1eca22edd82c386c2e99
--- /dev/null
+++ b/checkpoint-4/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d98c54a80a914fecf43d06ea81432499f46e70664f1d04651bf339163e30fa9e
+size 15024
diff --git a/checkpoint-4/scheduler.pt b/checkpoint-4/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d2dab90cbb99b9700e199cb8caf6c44017bb2060
--- /dev/null
+++ b/checkpoint-4/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b291921a8745ff595a683f927d6f701599edf6b56254d56c2156f8040beb64d4
+size 1064
diff --git a/checkpoint-4/special_tokens_map.json b/checkpoint-4/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-4/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-4/tokenizer.model b/checkpoint-4/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/checkpoint-4/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/checkpoint-4/tokenizer_config.json b/checkpoint-4/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44ceae3369b580af560afc8670fe5db6f3296960
--- /dev/null
+++ b/checkpoint-4/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-4/trainer_state.json b/checkpoint-4/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..cbaa24e2566872e9fb66dc263fdbe9c9c7c32293
--- /dev/null
+++ b/checkpoint-4/trainer_state.json
@@ -0,0 +1,81 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.8,
+ "eval_steps": 1,
+ "global_step": 4,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.8,
+ "grad_norm": 15.75,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9711,
+ "step": 1
+ },
+ {
+ "epoch": 0.8,
+ "eval_loss": 1.0638165473937988,
+ "eval_runtime": 30.4688,
+ "eval_samples_per_second": 3.282,
+ "eval_steps_per_second": 0.427,
+ "step": 1
+ },
+ {
+ "epoch": 1.4,
+ "grad_norm": 15.8125,
+ "learning_rate": 1.0000000000000002e-06,
+ "loss": 0.9758,
+ "step": 2
+ },
+ {
+ "epoch": 1.4,
+ "eval_loss": 1.034848690032959,
+ "eval_runtime": 30.4612,
+ "eval_samples_per_second": 3.283,
+ "eval_steps_per_second": 0.427,
+ "step": 2
+ },
+ {
+ "epoch": 2.2,
+ "grad_norm": 13.3125,
+ "learning_rate": 1.5e-06,
+ "loss": 0.9554,
+ "step": 3
+ },
+ {
+ "epoch": 2.2,
+ "eval_loss": 0.9725089073181152,
+ "eval_runtime": 30.4647,
+ "eval_samples_per_second": 3.282,
+ "eval_steps_per_second": 0.427,
+ "step": 3
+ },
+ {
+ "epoch": 2.8,
+ "grad_norm": 9.75,
+ "learning_rate": 2.0000000000000003e-06,
+ "loss": 0.9379,
+ "step": 4
+ },
+ {
+ "epoch": 2.8,
+ "eval_loss": 0.9125163555145264,
+ "eval_runtime": 30.462,
+ "eval_samples_per_second": 3.283,
+ "eval_steps_per_second": 0.427,
+ "step": 4
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 4,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 1,
+ "total_flos": 4.473640512493978e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-4/training_args.bin b/checkpoint-4/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..64c08307f1c0ca5b09d8fb5951f3580e76d420a8
--- /dev/null
+++ b/checkpoint-4/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de02e7bbff2d5b0589a4a17365760e0a58df0fe599195f544f24e2703a459993
+size 5752
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..483dd03fb9018116cb76b988c449d4933fbeec93
--- /dev/null
+++ b/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.0.dev0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..16dd90acbcc482b30661bf1c48c719fec177f4a8
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "do_sample": true,
+ "eos_token_id": 2,
+ "transformers_version": "4.40.0.dev0"
+}
diff --git a/pytorch_model-00001-of-00003.bin b/pytorch_model-00001-of-00003.bin
new file mode 100644
index 0000000000000000000000000000000000000000..851d664ec9bdf1578735e8d443b1d409f33f2845
--- /dev/null
+++ b/pytorch_model-00001-of-00003.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c225696eb24eece98026af9a302e7fa53cb2abe08b6d07b49bc7fe9086cb3400
+size 4943185632
diff --git a/pytorch_model-00002-of-00003.bin b/pytorch_model-00002-of-00003.bin
new file mode 100644
index 0000000000000000000000000000000000000000..0d21052130f07b950f4088589202cefa71c7312a
--- /dev/null
+++ b/pytorch_model-00002-of-00003.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77c188a44dd9013fdad116f3bddcd764bd09150c8f657457fcebb0e4b1d6e449
+size 4999844744
diff --git a/pytorch_model-00003-of-00003.bin b/pytorch_model-00003-of-00003.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6f5d4a04caa2143aeed0b727cf06ff2ab8fccce8
--- /dev/null
+++ b/pytorch_model-00003-of-00003.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d5073b02bf3c6b874c5b2dddfbbb0b2136ec99d9ec8626af16f94c3d504eebb4
+size 4540537414
diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..76fccc201c63903bcc555d59944b099e5cc7d336
--- /dev/null
+++ b/pytorch_model.bin.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 14483464192
+ },
+ "weight_map": {
+ "lm_head.weight": "pytorch_model-00003-of-00003.bin",
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
+ "model.norm.weight": "pytorch_model-00003-of-00003.bin"
+ }
+}
diff --git a/runs/Apr16_19-49-20_f789f17568eb/events.out.tfevents.1713296962.f789f17568eb.11363.0 b/runs/Apr16_19-49-20_f789f17568eb/events.out.tfevents.1713296962.f789f17568eb.11363.0
new file mode 100644
index 0000000000000000000000000000000000000000..fff3fdc16887be4d958c3505e973c92f2fe0dcd9
--- /dev/null
+++ b/runs/Apr16_19-49-20_f789f17568eb/events.out.tfevents.1713296962.f789f17568eb.11363.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:43d31566ead83ea2c6f4f785fd9e6293847dcdad0e9505d6aee48708a7639700
+size 7726
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44ceae3369b580af560afc8670fe5db6f3296960
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}