---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-15-e8f9c0429e47> in <module>()
1 gRNN(Variable(torch.FloatTensor(10000, 101)),
----> 2 Variable(torch.FloatTensor(1, 10, 101)))
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
200
201 def __call__(self, *input, **kwargs):
--> 202 result = self.forward(*input, **kwargs)
203 for hook in self._forward_hooks.values():
204 hook_result = hook(self, input, result)
<ipython-input-6-b3b5f9382efb> in forward(self, x, hidden)
12 def forward(self, x, hidden):
13 embeded = self.encoder(x)
---> 14 gru_output, hidden = self.gru(embeded.view(embeded.size()[0], 1, -1), hidden)
15 output = self.decoder(gru_output)
16 return output, hidden
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
200
201 def __call__(self, *input, **kwargs):
--> 202 result = self.forward(*input, **kwargs)
203 for hook in self._forward_hooks.values():
204 hook_result = hook(self, input, result)
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/modules/rnn.py in forward(self, input, hx)
89 dropout_state=self.dropout_state
90 )
---> 91 output, hidden = func(input, self.all_weights, hx)
92 if is_packed:
93 output = PackedSequence(output, batch_sizes)
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, *fargs, **fkwargs)
325 else:
326 func = AutogradRNN(*args, **kwargs)
--> 327 return func(input, *fargs, **fkwargs)
328
329 return forward
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, weight, hidden)
225 input = input.transpose(0, 1)
226
--> 227 nexth, output = func(input, hidden, weight)
228
229 if batch_first and batch_sizes is None:
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, hidden, weight)
65 l = i * num_directions + j
66
---> 67 hy, output = inner(input, hidden[l], weight[l])
68 next_hidden.append(hy)
69 all_output.append(output)
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in forward(input, hidden, weight)
94 steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
95 for i in steps:
---> 96 hidden = inner(input[i], hidden, *weight)
97 # hack to handle LSTM
98 output.append(isinstance(hidden, tuple) and hidden[0] or hidden)
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/nn/_functions/rnn.py in GRUCell(input, hidden, w_ih, w_hh, b_ih, b_hh)
40 h_r, h_i, h_n = gh.chunk(3, 1)
41
---> 42 resetgate = F.sigmoid(i_r + h_r)
43 inputgate = F.sigmoid(i_i + h_i)
44 newgate = F.tanh(i_n + resetgate * h_n)
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/autograd/variable.py in __add__(self, other)
742
743 def __add__(self, other):
--> 744 return self.add(other)
745 __radd__ = __add__
746
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/autograd/variable.py in add(self, other)
291
292 def add(self, other):
--> 293 return self._add(other, False)
294
295 def add_(self, other):
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/autograd/variable.py in _add(self, other, inplace)
285 def _add(self, other, inplace):
286 if isinstance(other, Variable):
--> 287 return Add(inplace)(self, other)
288 else:
289 assert not torch.is_tensor(other)
/home/ge/anaconda3/envs/deep-learning/lib/python3.6/site-packages/torch/autograd/_functions/basic_ops.py in forward(self, a, b)
18 return a.add_(b)
19 else:
---> 20 return a.add(b)
21
22 def backward(self, grad_output):
RuntimeError: inconsistent tensor size at /data/users/soumith/miniconda2/conda-bld/pytorch-cuda80-0.1.10_1488758793045/work/torch/lib/TH/generic/THTensorMath.c:827