{ "architectures": [ "CustomSeq2SeqModel" ], "bos_token_id": 0, "dec_emb_dim": 256, "decoder_start_token_id": 1, "dropout": 0.1, "enc_emb_dim": 256, "eos_token_id": 2, "hidden_dim": 256, "max_position_embeddings": 256, "model_type": "custom_seq2seq", "n_layers": 3, "num_attention_heads": 8, "pad_token_id": 1, "share_embeddings": false, "torch_dtype": "float32", "transformers_version": "4.44.2", "vocab_size": 28297 }