{ "architectures": [ "Seq2SeqTransformer" ], "dim_feedforward": 512, "dropout": 0.1, "emb_size": 512, "nhead": 8, "num_decoder_layers": 3, "num_encoder_layers": 3, "src_vocab_size": 19214, "tgt_vocab_size": 10837, "torch_dtype": "float32", "transformers_version": "4.38.2" }