Skip to content

Commit 36b5763

Browse files
authored
Update seq2seq_translation_tutorial.py
1 parent 48d2ae7 commit 36b5763

File tree

1 file changed

+1
-2
lines changed

1 file changed

+1
-2
lines changed

intermediate_source/seq2seq_translation_tutorial.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,6 @@ def forward(self, input):
369369
#
370370

371371
class DecoderRNN(nn.Module):
372-
# Standard non-attentional decoder
373372
def __init__(self, hidden_size, output_size):
374373
super(DecoderRNN, self).__init__()
375374
self.embedding = nn.Embedding(output_size, hidden_size)
@@ -444,7 +443,7 @@ def forward_step(self, input, hidden):
444443
#
445444
# Bahdanau attention, also known as additive attention, is a commonly used
446445
# attention mechanism in sequence-to-sequence models, particularly in neural
447-
# machine translation tasks. It was introduced by Dzmitry Bahdanau et al. in their
446+
# machine translation tasks. It was introduced by Bahdanau et al. in their
448447
# paper titled `Neural Machine Translation by Jointly Learning to Align and Translate <https://arxiv.org/pdf/1409.0473.pdf>`__.
449448
# This attention mechanism employs a learned alignment model to compute attention
450449
# scores between the encoder and decoder hidden states. It utilizes a feed-forward

0 commit comments

Comments
 (0)