Skip to content

Commit 28971c0

Browse files
committed
pack padded sequence and reverse
1 parent f73552c commit 28971c0

File tree

2 files changed

+9
-16
lines changed

2 files changed

+9
-16
lines changed

config.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,10 @@ def __init__(self):
1111
self.mode = "train" # TODO: Why do we want this in config?
1212

1313
# Device params
14-
self.use_cuda = torch.cuda.is_available()
15-
self.device = ('cuda' if self.use_cuda else 'cpu')
16-
# self.device = 'cpu'
17-
# self.use_cuda = False
14+
# self.use_cuda = torch.cuda.is_available()
15+
# self.device = ('cuda' if self.use_cuda else 'cpu')
16+
self.device = 'cpu'
17+
self.use_cuda = False
1818

1919
# Global dimension params
2020
self.embedding_dim = 100

networks.py

+5-12
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,14 @@ def forward(self, inputs, mask):
5757

5858
# Convert the numbers into embeddings
5959
inputs = self.embeddings(inputs.to('cpu'))
60-
packed = inputs
60+
# packed = inputs
6161

6262
# Get the sorted version of inputs as required for pack_padded_sequence
63-
# inputs_sorted = torch.index_select(inputs, 0, lens_argsort)
63+
inputs_sorted = torch.index_select(inputs, 0, lens_argsort)
6464

65-
# packed = pack_padded_sequence(inputs_sorted, lens, batch_first=True)
65+
packed = pack_padded_sequence(inputs_sorted, lens_sorted, batch_first=True)
6666
output, self.hidden = self.encoder(packed, self.hidden)
67-
# output, _ = pad_packed_sequence(output, batch_first=True)
67+
output, _ = pad_packed_sequence(output, batch_first=True)
6868

6969
# Restore batch elements to original order
7070
# output = torch.index_select(output, 0, lens_argsort_argsort)
@@ -119,8 +119,7 @@ def forward(self, inputs, mask):
119119
output = torch.index_select(output, 0, lens_argsort_argsort.to('cpu'))
120120

121121
# Make output contiguous for speed of future operations
122-
# TODO: Try without and time to see if this actually speeds up
123-
output = output.contiguous()
122+
# output = output.contiguous()
124123

125124
output = self.dropout(output)
126125
return output
@@ -161,8 +160,6 @@ def __init__(self, device,
161160
num_layers=num_layers,
162161
bidirectional=bidirectional)
163162

164-
# self.hidden = self.initHidden() # for GRU
165-
166163
def forward(self, U, d_mask, target_span):
167164

168165
batch_indices = torch.arange(self.batch_size, out=torch.LongTensor(self.batch_size))
@@ -226,10 +223,6 @@ def forward(self, U, d_mask, target_span):
226223
loss = cumulative_loss / self.max_dec_steps
227224
return loss, s_i, e_i
228225

229-
# def initHidden(self):
230-
# return torch.zeros(self.num_directions * self.num_layers, self.batch_size, self.hidden_size)
231-
232-
233226
class CoattentionNetwork(nn.Module):
234227
def __init__(self, device,
235228
hidden_size,

0 commit comments

Comments
 (0)