dvruette commited on
Commit
0a22702
1 Parent(s): 497017a

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +149 -0
README.md ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ tags:
6
+ - sft
7
+ pipeline_tag: text-generation
8
+ widget:
9
+ - text: <|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>
10
+ - text: <|prompter|>What's the Earth total population<|endoftext|><|assistant|>
11
+ - text: <|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>
12
+ ---
13
+
14
+ # Open-Assistant StableLM-7B SFT-7 Model
15
+
16
+
17
+ This is the 4th iteration English supervised-fine-tuning (SFT) model of
18
+ the [Open-Assistant](https://github.com/LAION-AI/Open-Assistant) project.
19
+ It is based on a Pythia 12B that was fine-tuned on human demonstrations
20
+ of assistant conversations collected through the
21
+ [https://open-assistant.io/](https://open-assistant.io/) human feedback web
22
+ app before March 25, 2023.
23
+
24
+ ## Model Details
25
+
26
+ - **Developed by:** [Open-Assistant Contributors](https://open-assistant.io/)
27
+ - **Model type:** Transformer-based Language Model
28
+ - **Language:** English
29
+ - **Finetuned from:** [stabilityai/stablelm-base-alpha-7b](https://huggingface.co/stabilityai/stablelm-base-alpha-7b)
30
+ - **Code:** [Open-Assistant/model/model_training](https://github.com/LAION-AI/Open-Assistant/tree/main/model/model_training)
31
+ - **Demo:** TODO
32
+ - **License:** Creative Commons license ([CC BY-SA-4.0](https://creativecommons.org/licenses/by-sa/4.0/))
33
+ - **Contact:** [Open-Assistant Discord](https://ykilcher.com/open-assistant-discord)
34
+
35
+ ## Prompting
36
+
37
+ Two special tokens are used to mark the beginning of user and assistant turns:
38
+ `<|prompter|>` and `<|assistant|>`. Each turn ends with a `<|endoftext|>` token.
39
+
40
+ Input prompt example:
41
+ ```
42
+ <|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>
43
+ ```
44
+ The input ends with the `<|assistant|>` token to signal that the model should
45
+ start generating the assistant reply.
46
+
47
+
48
+ ## Dev Details
49
+
50
+ - wandb: https://wandb.ai/open-assistant/supervised-finetuning/runs/08dfhyuc
51
+ - base model: [stabilityai/stablelm-base-alpha-7b](https://huggingface.co/stabilityai/stablelm-base-alpha-7b)
52
+ - checkpoint: 3 epochs (12000 steps)
53
+
54
+ command: `deepspeed trainer_sft.py --configs defaults reference-data reference-pythia-12b --cache_dir /home/ubuntu/data_cache --output_dir .saved/oasst-sft-3-pythia-12b-reference_2kpre --num_train_epochs 8 --residual_dropout 0.2 --deepspeed --use_flash_attention true --model_name andreaskoepf/pythia-12b-pre-2000`
55
+
56
+ data:
57
+ ```
58
+ oasst-mix:
59
+ save_strategy: epoch
60
+ sort_by_length: false
61
+ use_custom_sampler: false
62
+ datasets:
63
+ - oasst_export:
64
+ lang: "bg,ca,cs,da,de,en,es,fr,hr,hu,it,nl,pl,pt,ro,ru,sl,sr,sv,uk"
65
+ input_file_path: 2023-04-12_oasst_release_ready_synth.jsonl.gz
66
+ - vicuna:
67
+ val_split: 0.05
68
+ max_val_set: 800
69
+ fraction: 1.0
70
+ - dolly15k:
71
+ val_split: 0.05
72
+ max_val_set: 300
73
+ - grade_school_math_instructions:
74
+ val_split: 0.05
75
+ - code_alpaca:
76
+ val_split: 0.05
77
+ max_val_set: 250
78
+ ```
79
+
80
+
81
+ stablelm:
82
+ ```
83
+ stablelm-7b:
84
+ dtype: fp16
85
+ log_dir: stablelm_log_7b
86
+ model_name: stabilityai/stablelm-base-alpha-7b
87
+ output_dir: stablelm_7b
88
+ max_length: 4096
89
+ warmup_steps: 100
90
+ gradient_checkpointing: true
91
+ gradient_accumulation_steps: 2
92
+ per_device_train_batch_size: 4
93
+ per_device_eval_batch_size: 4
94
+ eval_steps: 100
95
+ save_steps: 500
96
+ num_train_epochs: 4
97
+ save_total_limit: 4
98
+ use_flash_attention: true
99
+ ```
100
+
101
+ zero config:
102
+ ```
103
+ {
104
+ "fp16": {
105
+ "enabled": "auto",
106
+ "loss_scale": 0,
107
+ "loss_scale_window": 1000,
108
+ "initial_scale_power": 16,
109
+ "hysteresis": 2,
110
+ "min_loss_scale": 1
111
+ },
112
+ "bf16": {
113
+ "enabled": "auto"
114
+ },
115
+ "optimizer": {
116
+ "type": "AdamW",
117
+ "params": {
118
+ "lr": "auto",
119
+ "betas": "auto",
120
+ "eps": "auto",
121
+ "weight_decay": "auto"
122
+ }
123
+ },
124
+ "scheduler": {
125
+ "type": "WarmupDecayLR",
126
+ "params": {
127
+ "warmup_min_lr": "auto",
128
+ "warmup_max_lr": "auto",
129
+ "warmup_num_steps": "auto",
130
+ "total_num_steps": "auto"
131
+ }
132
+ },
133
+ "zero_optimization": {
134
+ "stage": 2,
135
+ "allgather_partitions": true,
136
+ "allgather_bucket_size": 1e9,
137
+ "overlap_comm": false,
138
+ "reduce_scatter": true,
139
+ "reduce_bucket_size": 1e9,
140
+ "contiguous_gradients": true
141
+ },
142
+ "gradient_accumulation_steps": "auto",
143
+ "gradient_clipping": "auto",
144
+ "steps_per_print": 2000,
145
+ "train_batch_size": "auto",
146
+ "train_micro_batch_size_per_gpu": "auto",
147
+ "wall_clock_breakdown": false
148
+ }
149
+ ```