liuylhf commited on
Commit
786ef4b
1 Parent(s): 1ac42e8

Model save

Browse files
README.md CHANGED
@@ -2,7 +2,6 @@
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
5
- - axolotl
6
  - generated_from_trainer
7
  base_model: mistralai/Mixtral-8x7B-Instruct-v0.1
8
  model-index:
@@ -27,10 +26,7 @@ chat_template: inst
27
  dataset_prepared_path: last_run_prepared
28
  datasets:
29
  - conversation: mistral
30
- path: 497b7e7687f94b09b0e633b679727a04/./data/with_function_response/function_not_used_training.jsonl
31
- type: sharegpt
32
- - conversation: mistral
33
- path: 497b7e7687f94b09b0e633b679727a04/./data/with_function_response/function_used_training.jsonl
34
  type: sharegpt
35
  debug: null
36
  deepspeed: null
@@ -69,7 +65,7 @@ model_config:
69
  model_type: AutoModelForCausalLM
70
  num_epochs: 1
71
  optimizer: paged_adamw_8bit
72
- output_dir: 497b7e7687f94b09b0e633b679727a04/model
73
  pad_to_sequence_len: true
74
  resume_from_checkpoint: null
75
  sample_packing: true
@@ -93,9 +89,7 @@ xformers_attention: null
93
 
94
  # mixtral-remove-negative-data
95
 
96
- This model is a fine-tuned version of [mistralai/Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) on the None dataset.
97
- It achieves the following results on the evaluation set:
98
- - Loss: 0.0990
99
 
100
  ## Model description
101
 
@@ -128,18 +122,6 @@ The following hyperparameters were used during training:
128
  - lr_scheduler_warmup_steps: 10
129
  - num_epochs: 1
130
 
131
- ### Training results
132
-
133
- | Training Loss | Epoch | Step | Validation Loss |
134
- |:-------------:|:-----:|:----:|:---------------:|
135
- | 4.0851 | 0.0 | 1 | 4.1447 |
136
- | 0.1574 | 0.2 | 40 | 0.1537 |
137
- | 0.1149 | 0.4 | 80 | 0.1155 |
138
- | 0.1084 | 0.6 | 120 | 0.1053 |
139
- | 0.1099 | 0.8 | 160 | 0.1002 |
140
- | 0.1054 | 1.0 | 200 | 0.0990 |
141
-
142
-
143
  ### Framework versions
144
 
145
  - PEFT 0.8.2
 
2
  license: apache-2.0
3
  library_name: peft
4
  tags:
 
5
  - generated_from_trainer
6
  base_model: mistralai/Mixtral-8x7B-Instruct-v0.1
7
  model-index:
 
26
  dataset_prepared_path: last_run_prepared
27
  datasets:
28
  - conversation: mistral
29
+ path: dd7ba3a8030a4c7382d51a5d894f5cb4/./data/with_function_response/function_used_training.jsonl
 
 
 
30
  type: sharegpt
31
  debug: null
32
  deepspeed: null
 
65
  model_type: AutoModelForCausalLM
66
  num_epochs: 1
67
  optimizer: paged_adamw_8bit
68
+ output_dir: dd7ba3a8030a4c7382d51a5d894f5cb4/model
69
  pad_to_sequence_len: true
70
  resume_from_checkpoint: null
71
  sample_packing: true
 
89
 
90
  # mixtral-remove-negative-data
91
 
92
+ This model is a fine-tuned version of [mistralai/Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) on an unknown dataset.
 
 
93
 
94
  ## Model description
95
 
 
122
  - lr_scheduler_warmup_steps: 10
123
  - num_epochs: 1
124
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  ### Framework versions
126
 
127
  - PEFT 0.8.2
adapter_config.json CHANGED
@@ -19,10 +19,10 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "v_proj",
23
  "k_proj",
 
24
  "o_proj",
25
- "q_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "k_proj",
23
+ "q_proj",
24
  "o_proj",
25
+ "v_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e38812772ac55f93a5c705bc028f6bda448c0ea78a32f7b9be1d647caaba5224
3
  size 109086416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcde8c449728c8b36cbcf988c205f5d346e3b20fe4e8d76c2ca2bedabc7bd473
3
  size 109086416
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d41dbd958da6f59f7b0c8f21d54fd6a4ca27c71d3c6d51e8942b99a2c60ebf9
3
  size 5624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a210d73acd9f49ee5b760356c7364b1d174e69f267224756748f7698346a851
3
  size 5624