deboramachadoandrade commited on
Commit
3d5a261
1 Parent(s): ef59d94

ai-maker-space/mistral-7binstruct-summary-100s

Browse files
README.md CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 1.4551
24
 
25
  ## Model description
26
 
@@ -52,14 +52,14 @@ The following hyperparameters were used during training:
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
- | 1.3263 | 0.22 | 25 | 1.4551 |
56
- | 1.4328 | 0.43 | 50 | 1.4551 |
57
- | 1.4559 | 0.65 | 75 | 1.4551 |
58
- | 1.4446 | 0.86 | 100 | 1.4551 |
59
- | 1.4565 | 1.08 | 125 | 1.4551 |
60
- | 1.3922 | 1.29 | 150 | 1.4551 |
61
- | 1.3697 | 1.51 | 175 | 1.4551 |
62
- | 1.4504 | 1.72 | 200 | 1.4551 |
63
 
64
 
65
  ### Framework versions
 
20
 
21
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.4866
24
 
25
  ## Model description
26
 
 
52
 
53
  | Training Loss | Epoch | Step | Validation Loss |
54
  |:-------------:|:-----:|:----:|:---------------:|
55
+ | 1.3299 | 0.22 | 25 | 1.4606 |
56
+ | 1.4385 | 0.43 | 50 | 1.4657 |
57
+ | 1.4628 | 0.65 | 75 | 1.4560 |
58
+ | 1.4457 | 0.86 | 100 | 1.4538 |
59
+ | 1.4249 | 1.08 | 125 | 1.4624 |
60
+ | 1.2826 | 1.29 | 150 | 1.4834 |
61
+ | 1.264 | 1.51 | 175 | 1.4860 |
62
+ | 1.3431 | 1.72 | 200 | 1.4866 |
63
 
64
 
65
  ### Framework versions
adapter_config.json CHANGED
@@ -9,13 +9,13 @@
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
- "lora_alpha": 16,
13
  "lora_dropout": 0.1,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
- "r": 8,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
+ "lora_alpha": 64,
13
  "lora_dropout": 0.1,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
+ "r": 32,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e44ce263e6fd885f50d82ca515b9325375b43ee36ededb75acf161ce88bc2e41
3
- size 48
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d4aa27b3696e05f1cd612b74e2ca087497f0a8fd4837b42f2ed27cb0ffbb917
3
+ size 54549712
runs/Mar04_13-34-29_f7ad4747e4a8/events.out.tfevents.1709559269.f7ad4747e4a8.1838.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfe0d813db65c8fb322778466fac3e1795c6fa626405e2fe39202ead055d2a45
3
+ size 11716
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48d34dcd61de275b075414ab76f9e08f65d632ad46566f29d790e5d2391daac8
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41fbfff89d59b82561e7a45340838521acd56f185733ed361ecc7b073de3f28b
3
  size 4920