Translation
Transformers
PyTorch
nllb-moe
feature-extraction
ArthurZ HF staff commited on
Commit
00d1629
1 Parent(s): 59fc265

Update config.json

Browse files

This should fix the issue with AutoModelForSeq2SeqLM

Files changed (1) hide show
  1. config.json +1 -1
config.json CHANGED
@@ -27,7 +27,7 @@
27
  "is_encoder_decoder": true,
28
  "max_length": 200,
29
  "max_position_embeddings": 1024,
30
- "model_type": "nllb_moe",
31
  "moe_eval_capacity_token_fraction": 1.0,
32
  "moe_token_dropout": 0.2,
33
  "normalize_router_prob_before_dropping": false,
 
27
  "is_encoder_decoder": true,
28
  "max_length": 200,
29
  "max_position_embeddings": 1024,
30
+ "model_type": "nllb-moe",
31
  "moe_eval_capacity_token_fraction": 1.0,
32
  "moe_token_dropout": 0.2,
33
  "normalize_router_prob_before_dropping": false,