isaacOnline commited on
Commit
57be099
1 Parent(s): 025c34a

Training in progress, step 20

Browse files
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "k_proj",
23
  "up_proj",
24
- "q_proj",
25
  "down_proj",
26
- "gate_proj",
27
  "v_proj",
28
- "o_proj"
 
 
 
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.1",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "up_proj",
 
23
  "down_proj",
 
24
  "v_proj",
25
+ "o_proj",
26
+ "gate_proj",
27
+ "q_proj",
28
+ "k_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1519d2e4b6be2c8cbb7c088ebe02623256a9b6613fec820340e772e2fb8f83c9
3
- size 319876032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3206a900b2723ae4b7d712d2c2f5433c58bbe9b2b17591a1ea6e0c418b3e7a8a
3
+ size 335604696
qual_clasification.log CHANGED
@@ -1,83 +1,5 @@
1
- 2024-02-09 22:49:30,776 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
2
- 2024-02-09 22:49:31,999 - INFO - __main__ - Loaded LoRA Model
3
- 2024-02-09 22:49:33,821 - INFO - __main__ - Instantiated Trainer
4
- 2024-02-09 22:50:28,617 - INFO - __main__ - Completed fine-tuning
5
- 2024-02-09 22:50:29,632 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
6
- 2024-02-09 22:50:44,705 - INFO - __main__ - Saved model to hub
7
- 2024-02-09 22:59:18,679 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
8
- 2024-02-09 22:59:19,887 - INFO - __main__ - Loaded LoRA Model
9
- 2024-02-09 22:59:21,478 - INFO - __main__ - Instantiated Trainer
10
- 2024-02-09 22:59:36,469 - INFO - __main__ - Completed fine-tuning
11
- 2024-02-09 22:59:38,512 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
12
- 2024-02-09 22:59:50,968 - INFO - __main__ - Saved model to hub
13
- 2024-02-09 23:01:29,319 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
14
- 2024-02-09 23:01:30,540 - INFO - __main__ - Loaded LoRA Model
15
- 2024-02-09 23:01:32,151 - INFO - __main__ - Instantiated Trainer
16
- 2024-02-09 23:01:47,224 - INFO - __main__ - Completed fine-tuning
17
- 2024-02-09 23:01:49,330 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
18
- 2024-02-09 23:02:01,724 - INFO - __main__ - Saved model to hub
19
- 2024-02-09 23:22:55,575 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
20
- 2024-02-09 23:22:56,483 - INFO - __main__ - Loaded LoRA Model
21
- 2024-02-09 23:22:57,125 - INFO - __main__ - Instantiated Trainer
22
- 2024-02-09 23:23:11,695 - INFO - __main__ - Completed fine-tuning
23
- 2024-02-09 23:23:13,671 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
24
- 2024-02-09 23:23:34,172 - INFO - __main__ - Saved model to hub
25
- 2024-02-09 23:25:17,080 - INFO - __main__ - Completed EM Metrics evaluation
26
- 2024-02-09 23:51:26,608 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
27
- 2024-02-09 23:51:27,524 - INFO - __main__ - Loaded LoRA Model
28
- 2024-02-09 23:51:28,180 - INFO - __main__ - Instantiated Trainer
29
- 2024-02-10 01:04:52,412 - INFO - __main__ - Completed fine-tuning
30
- 2024-02-10 01:04:54,181 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
31
- 2024-02-10 01:04:56,305 - INFO - __main__ - Saved model to hub
32
- 2024-02-10 01:06:40,587 - INFO - __main__ - Completed EM Metrics evaluation
33
- 2024-02-10 01:08:03,969 - INFO - __main__ - Loaded Model ID: mistralai/Mistral-7B-Instruct-v0.1
34
- 2024-02-10 01:08:05,422 - INFO - __main__ - Loaded LoRA Model
35
- 2024-02-10 01:08:06,024 - INFO - __main__ - Instantiated Trainer
36
- 2024-02-10 02:20:21,347 - INFO - __main__ - Completed fine-tuning
37
- 2024-02-10 02:20:23,577 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
38
- 2024-02-10 02:20:26,036 - INFO - __main__ - Saved model to hub
39
- 2024-02-10 02:22:11,906 - INFO - __main__ - Completed EM Metrics evaluation
40
- 2024-02-10 02:23:35,322 - INFO - __main__ - Loaded Model ID: meta-llama/Llama-2-7b-chat-hf
41
- 2024-02-10 02:23:36,493 - INFO - __main__ - Loaded LoRA Model
42
- 2024-02-10 02:23:37,120 - INFO - __main__ - Instantiated Trainer
43
- 2024-02-11 16:43:38,722 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
44
- 2024-02-11 16:43:39,667 - INFO - __main__ - Loaded LoRA Model
45
- 2024-02-11 16:43:40,416 - INFO - __main__ - Instantiated Trainer
46
- 2024-02-11 16:45:54,754 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
47
- 2024-02-11 16:45:55,658 - INFO - __main__ - Loaded LoRA Model
48
- 2024-02-11 16:45:56,299 - INFO - __main__ - Instantiated Trainer
49
- 2024-02-11 19:11:54,957 - INFO - __main__ - Completed fine-tuning
50
- 2024-02-11 19:11:56,915 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
51
- 2024-02-11 19:11:59,283 - INFO - __main__ - Saved model to hub
52
- 2024-02-11 19:13:41,986 - INFO - __main__ - Completed EM Metrics evaluation
53
- 2024-02-11 19:14:07,990 - INFO - __main__ - Loaded Model ID: tiiuae/falcon-7b-instruct
54
- 2024-02-11 19:14:08,914 - INFO - __main__ - Loaded LoRA Model
55
- 2024-02-11 19:14:09,545 - INFO - __main__ - Instantiated Trainer
56
- 2024-02-12 00:06:16,122 - INFO - __main__ - Completed fine-tuning
57
- 2024-02-12 00:06:18,016 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
58
- 2024-02-12 00:06:20,394 - INFO - __main__ - Saved model to hub
59
- 2024-02-12 00:08:03,435 - INFO - __main__ - Completed EM Metrics evaluation
60
- 2024-02-12 00:09:33,450 - INFO - __main__ - Loaded Model ID: mistralai/Mistral-7B-Instruct-v0.1
61
- 2024-02-12 00:09:34,543 - INFO - __main__ - Loaded LoRA Model
62
- 2024-02-12 00:09:35,118 - INFO - __main__ - Instantiated Trainer
63
- 2024-02-12 02:32:57,446 - INFO - __main__ - Completed fine-tuning
64
- 2024-02-12 02:32:59,744 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
65
- 2024-02-12 02:33:02,130 - INFO - __main__ - Saved model to hub
66
- 2024-02-12 02:34:45,762 - INFO - __main__ - Completed EM Metrics evaluation
67
- 2024-02-12 02:35:11,284 - INFO - __main__ - Loaded Model ID: mistralai/Mistral-7B-Instruct-v0.1
68
- 2024-02-12 02:35:12,376 - INFO - __main__ - Loaded LoRA Model
69
- 2024-02-12 02:35:12,958 - INFO - __main__ - Instantiated Trainer
70
- 2024-02-12 07:21:35,467 - INFO - __main__ - Completed fine-tuning
71
- 2024-02-12 07:21:37,814 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
72
- 2024-02-12 07:21:40,237 - INFO - __main__ - Saved model to hub
73
- 2024-02-12 07:23:23,882 - INFO - __main__ - Completed EM Metrics evaluation
74
- 2024-02-12 07:24:49,119 - INFO - __main__ - Loaded Model ID: meta-llama/Llama-2-7b-chat-hf
75
- 2024-02-12 07:24:50,284 - INFO - __main__ - Loaded LoRA Model
76
- 2024-02-12 07:24:51,020 - INFO - __main__ - Instantiated Trainer
77
- 2024-02-12 09:44:02,757 - INFO - __main__ - Completed fine-tuning
78
- 2024-02-12 09:44:05,152 - INFO - __main__ - Saved model and tokenizer to machine_learning/llm_finetune_models/0
79
- 2024-02-12 09:44:08,108 - INFO - __main__ - Saved model to hub
80
- 2024-02-12 09:45:44,559 - INFO - __main__ - Completed EM Metrics evaluation
81
- 2024-02-12 09:46:09,619 - INFO - __main__ - Loaded Model ID: meta-llama/Llama-2-7b-chat-hf
82
- 2024-02-12 09:46:10,764 - INFO - __main__ - Loaded LoRA Model
83
- 2024-02-12 09:46:11,386 - INFO - __main__ - Instantiated Trainer
 
1
+ 2024-02-12 22:26:20,315 - INFO - __main__ - Loaded Model ID: meta-llama/Llama-2-7b-chat-hf
2
+ 2024-02-12 22:26:21,959 - INFO - __main__ - Loaded LoRA Model
3
+ 2024-02-12 22:34:22,187 - INFO - __main__ - Loaded Model ID: mistralai/Mistral-7B-Instruct-v0.1
4
+ 2024-02-12 22:34:23,296 - INFO - __main__ - Loaded LoRA Model
5
+ 2024-02-12 22:34:23,861 - INFO - __main__ - Instantiated Trainer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -27,15 +27,16 @@
27
  "special": true
28
  }
29
  },
 
30
  "bos_token": "<s>",
31
- "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "</s>",
34
- "legacy": false,
35
  "model_max_length": 1000000000000000019884624838656,
36
  "pad_token": "</s>",
37
- "padding_side": "right",
38
  "sp_model_kwargs": {},
 
39
  "tokenizer_class": "LlamaTokenizer",
40
  "unk_token": "<unk>",
41
  "use_default_system_prompt": false
 
27
  "special": true
28
  }
29
  },
30
+ "additional_special_tokens": [],
31
  "bos_token": "<s>",
32
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
33
  "clean_up_tokenization_spaces": false,
34
  "eos_token": "</s>",
35
+ "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
  "pad_token": "</s>",
 
38
  "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
41
  "unk_token": "<unk>",
42
  "use_default_system_prompt": false
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3852bb9dfcaa97d2473f8c2afa047168178622102826f212bc6722e2edad01b2
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7456c20f600582e4d15f4903fccc90fd16d43848ae4f5a63e62e4ba5a994945
3
  size 4728