File size: 682 Bytes
12d8f44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75c31f0
 
12d8f44
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
experiment_name: 'runs/transformer_huge'

dataset:
  src_lang: 'lo'
  src_tokenizer: 'BPE'
  src_max_seq_len: 400
  tgt_lang: 'vi'
  tgt_tokenizer: 'WordLevel'
  tgt_max_seq_len: 350
  train_dataset: 'train_clean.dat'
  validate_dataset: 'dev_clean.dat'
  tokenizer_file: "tokenizer_{0}.json"
  bleu_dataset: 'test2023'

model:
  d_model: 512
  num_heads: 8
  d_ff: 2048
  dropout_p: 0.15
  num_encoder_layers: 8
  num_decoder_layers: 4
  model_folder: "weights"
  model_basename: "transformer_huge"
  preload: '_61'

train:
  lr: 0.001 # 1e-2
  batch_size: 32
  num_epochs: 61
  label_smoothing: 0.1
  on_colab: False # are you training on Colab?
  patience: 1
  warm_up_steps: 200