{ "attention": "bahdanau", "attention_architecture": "standard", "batch_size": 128, "bpe_delimiter": null, "colocate_gradients_with_ops": true, "decay_factor": 1.0, "decay_steps": 1000, "dropout": 0.0, "encoder_type": "uni", "eos": "", "forget_bias": 1.0, "infer_batch_size": 16, "init_weight": 0.1, "learning_rate": 3e-4, "max_gradient_norm": 1.0, "metrics": ["bleu"], "num_buckets": 10, "num_layers": 2, "num_train_steps": 25000, "num_units": 512, "optimizer": "adam", "residual": false, "share_vocab": false, "sos": "", "source_reverse": true, "src_max_len": 100, "src_max_len_infer": null, "start_decay_step": 17000, "steps_per_external_eval": 100, "steps_per_stats": 100, "tgt_max_len": 100, "tgt_max_len_infer": null, "time_major": true, "unit_type": "lstm", "beam_width": 10 }