wissamantoun
commited on
Commit
•
6e4c9a8
1
Parent(s):
96c231b
Update README.md
Browse files
README.md
CHANGED
@@ -14,6 +14,8 @@ widget:
|
|
14 |
|
15 |
# Arabic GPT2
|
16 |
|
|
|
|
|
17 |
You can find more information in our paper [AraGPT2](https://arxiv.org/abs/2012.15520)
|
18 |
|
19 |
The code in this repository was used to train all GPT2 variants. The code support training and fine-tuning GPT2 on GPUs and TPUs via the TPUEstimator API.
|
@@ -39,7 +41,7 @@ from arabert.aragpt2.grover.modeling_gpt2 import GPT2LMHeadModel
|
|
39 |
|
40 |
from arabert.preprocess import ArabertPreprocessor
|
41 |
|
42 |
-
MODEL_NAME='aragpt2-large'
|
43 |
arabert_prep = ArabertPreprocessor(model_name=MODEL_NAME)
|
44 |
|
45 |
text=""
|
@@ -75,25 +77,7 @@ python create_pretraining_data.py
|
|
75 |
|
76 |
Finetuning:
|
77 |
```bash
|
78 |
-
python3 run_pretraining.py
|
79 |
-
--input_file="gs://<GS_BUCKET>/pretraining_data/*" \\
|
80 |
-
--output_dir="gs://<GS_BUCKET>/pretraining_model/" \\
|
81 |
-
--config_file="config/small_hparams.json" \\
|
82 |
-
--batch_size=128 \\
|
83 |
-
--eval_batch_size=8 \\
|
84 |
-
--num_train_steps= \\
|
85 |
-
--num_warmup_steps= \\
|
86 |
-
--learning_rate= \\
|
87 |
-
--save_checkpoints_steps= \\
|
88 |
-
--max_seq_length=1024 \\
|
89 |
-
--max_eval_steps= \\
|
90 |
-
--optimizer="lamb" \\
|
91 |
-
--iterations_per_loop=5000 \\
|
92 |
-
--keep_checkpoint_max=10 \\
|
93 |
-
--use_tpu=True \\
|
94 |
-
--tpu_name=<TPU NAME> \\
|
95 |
-
--do_train=True \\
|
96 |
-
--do_eval=False
|
97 |
```
|
98 |
# Model Sizes
|
99 |
|
|
|
14 |
|
15 |
# Arabic GPT2
|
16 |
|
17 |
+
<img src="https://raw.githubusercontent.com/aub-mind/arabert/master/AraGPT2.png" width="100" align="left"/>
|
18 |
+
|
19 |
You can find more information in our paper [AraGPT2](https://arxiv.org/abs/2012.15520)
|
20 |
|
21 |
The code in this repository was used to train all GPT2 variants. The code support training and fine-tuning GPT2 on GPUs and TPUs via the TPUEstimator API.
|
|
|
41 |
|
42 |
from arabert.preprocess import ArabertPreprocessor
|
43 |
|
44 |
+
MODEL_NAME='aubmindlab/aragpt2-large'
|
45 |
arabert_prep = ArabertPreprocessor(model_name=MODEL_NAME)
|
46 |
|
47 |
text=""
|
|
|
77 |
|
78 |
Finetuning:
|
79 |
```bash
|
80 |
+
python3 run_pretraining.py \\\r\n --input_file="gs://<GS_BUCKET>/pretraining_data/*" \\\r\n --output_dir="gs://<GS_BUCKET>/pretraining_model/" \\\r\n --config_file="config/small_hparams.json" \\\r\n --batch_size=128 \\\r\n --eval_batch_size=8 \\\r\n --num_train_steps= \\\r\n --num_warmup_steps= \\\r\n --learning_rate= \\\r\n --save_checkpoints_steps= \\\r\n --max_seq_length=1024 \\\r\n --max_eval_steps= \\\r\n --optimizer="lamb" \\\r\n --iterations_per_loop=5000 \\\r\n --keep_checkpoint_max=10 \\\r\n --use_tpu=True \\\r\n --tpu_name=<TPU NAME> \\\r\n --do_train=True \\\r\n --do_eval=False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
```
|
82 |
# Model Sizes
|
83 |
|