deuswoof commited on
Commit
782358c
1 Parent(s): 5cd140f

Training in progress, step 10

Browse files
Files changed (5) hide show
  1. README.md +13 -0
  2. adapter_model.bin +1 -1
  3. adapter_model.safetensors +2 -2
  4. config.json +18 -18
  5. training_args.bin +1 -1
README.md CHANGED
@@ -52,6 +52,18 @@ The following `bitsandbytes` quantization config was used during training:
52
  - bnb_4bit_use_double_quant: True
53
  - bnb_4bit_compute_dtype: bfloat16
54
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  The following `bitsandbytes` quantization config was used during training:
56
  - quant_method: bitsandbytes
57
  - load_in_8bit: False
@@ -69,5 +81,6 @@ The following `bitsandbytes` quantization config was used during training:
69
  - PEFT 0.5.0
70
  - PEFT 0.5.0
71
  - PEFT 0.5.0
 
72
 
73
  - PEFT 0.5.0
 
52
  - bnb_4bit_use_double_quant: True
53
  - bnb_4bit_compute_dtype: bfloat16
54
 
55
+ The following `bitsandbytes` quantization config was used during training:
56
+ - quant_method: bitsandbytes
57
+ - load_in_8bit: False
58
+ - load_in_4bit: True
59
+ - llm_int8_threshold: 6.0
60
+ - llm_int8_skip_modules: None
61
+ - llm_int8_enable_fp32_cpu_offload: False
62
+ - llm_int8_has_fp16_weight: False
63
+ - bnb_4bit_quant_type: nf4
64
+ - bnb_4bit_use_double_quant: True
65
+ - bnb_4bit_compute_dtype: bfloat16
66
+
67
  The following `bitsandbytes` quantization config was used during training:
68
  - quant_method: bitsandbytes
69
  - load_in_8bit: False
 
81
  - PEFT 0.5.0
82
  - PEFT 0.5.0
83
  - PEFT 0.5.0
84
+ - PEFT 0.5.0
85
 
86
  - PEFT 0.5.0
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75b7cbced5aaf55b4676459233174fca5aa8e63d7ee9515a2bafbbbdd72274e2
3
  size 100733709
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e877cf672df3d94e5a16765f9ec9ae2990abf7eb5a339fb68e217c35f38207d1
3
  size 100733709
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ceda947757e8e863957b7c0ac6282dda8be63d6aff7d21ca866b79d3024273c
3
- size 261131840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dc4f031b070a3eeff0f3bc486b8b7591e15859c95ec15b6d98269d64ced9671
3
+ size 100690288
config.json CHANGED
@@ -1,33 +1,33 @@
1
  {
2
- "_name_or_path": "tiiuae/falcon-7b",
3
- "alibi": false,
4
  "apply_residual_connection_post_layernorm": false,
5
  "architectures": [
6
  "FalconForCausalLM"
7
  ],
8
  "attention_dropout": 0.0,
9
  "auto_map": {
10
- "AutoConfig": "tiiuae/falcon-7b--configuration_falcon.FalconConfig",
11
- "AutoModel": "tiiuae/falcon-7b--modeling_falcon.FalconModel",
12
- "AutoModelForCausalLM": "tiiuae/falcon-7b--modeling_falcon.FalconForCausalLM",
13
- "AutoModelForQuestionAnswering": "tiiuae/falcon-7b--modeling_falcon.FalconForQuestionAnswering",
14
- "AutoModelForSequenceClassification": "tiiuae/falcon-7b--modeling_falcon.FalconForSequenceClassification",
15
- "AutoModelForTokenClassification": "tiiuae/falcon-7b--modeling_falcon.FalconForTokenClassification"
16
  },
17
- "bias": false,
18
- "bos_token_id": 11,
19
- "eos_token_id": 11,
20
  "hidden_dropout": 0.0,
21
- "hidden_size": 4544,
22
  "initializer_range": 0.02,
23
  "layer_norm_epsilon": 1e-05,
24
  "model_type": "falcon",
25
- "multi_query": true,
26
  "new_decoder_architecture": false,
27
- "num_attention_heads": 71,
28
- "num_hidden_layers": 32,
29
- "num_kv_heads": 71,
30
- "parallel_attn": true,
31
  "quantization_config": {
32
  "bnb_4bit_compute_dtype": "bfloat16",
33
  "bnb_4bit_quant_type": "nf4",
@@ -43,5 +43,5 @@
43
  "torch_dtype": "bfloat16",
44
  "transformers_version": "4.34.1",
45
  "use_cache": false,
46
- "vocab_size": 65024
47
  }
 
1
  {
2
+ "_name_or_path": "tiiuae/falcon-rw-1b",
3
+ "alibi": true,
4
  "apply_residual_connection_post_layernorm": false,
5
  "architectures": [
6
  "FalconForCausalLM"
7
  ],
8
  "attention_dropout": 0.0,
9
  "auto_map": {
10
+ "AutoConfig": "tiiuae/falcon-rw-1b--configuration_falcon.FalconConfig",
11
+ "AutoModel": "tiiuae/falcon-rw-1b--modeling_falcon.FalconModel",
12
+ "AutoModelForCausalLM": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForCausalLM",
13
+ "AutoModelForQuestionAnswering": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForQuestionAnswering",
14
+ "AutoModelForSequenceClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForSequenceClassification",
15
+ "AutoModelForTokenClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForTokenClassification"
16
  },
17
+ "bias": true,
18
+ "bos_token_id": 1,
19
+ "eos_token_id": 2,
20
  "hidden_dropout": 0.0,
21
+ "hidden_size": 2048,
22
  "initializer_range": 0.02,
23
  "layer_norm_epsilon": 1e-05,
24
  "model_type": "falcon",
25
+ "multi_query": false,
26
  "new_decoder_architecture": false,
27
+ "num_attention_heads": 32,
28
+ "num_hidden_layers": 24,
29
+ "num_kv_heads": 32,
30
+ "parallel_attn": false,
31
  "quantization_config": {
32
  "bnb_4bit_compute_dtype": "bfloat16",
33
  "bnb_4bit_quant_type": "nf4",
 
43
  "torch_dtype": "bfloat16",
44
  "transformers_version": "4.34.1",
45
  "use_cache": false,
46
+ "vocab_size": 50304
47
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46dfa040a93c7f61a97e86a998ade17bf1b307d0aa4af935c733a7c7ad2cd6b3
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e952a35dab9b7558195f77eab50ba42c6f635e30bceddcfbeac8e0ede9843ec8
3
  size 4091