Change shape to hardcoded values:

tensor_one.view(5, 1)

Only set the length in a subset of the dimensions, making the rest fit in:

tensor_one.view(5, -1)

Match the shape of another tensor:

tensor_one.view(*tensor_two.shape)

# Alternative
tensor_one.view_as(tensor_two.shape)
tensor.type(torch.FloatTensor)
top_p, top_class = ps.topk(1, dim=1)

# Look at the most likely classes for the first 10 examples
print(top_class[:10,:])
# Example of using Sequential
model = nn.Sequential(
          nn.Conv2d(1,20,5),
          nn.ReLU(),
          nn.Conv2d(20,64,5),
          nn.ReLU()
        )

# Example of using Sequential with OrderedDict
model = nn.Sequential(OrderedDict([
          ('conv1', nn.Conv2d(1,20,5)),
          ('relu1', nn.ReLU()),
          ('conv2', nn.Conv2d(20,64,5)),
          ('relu2', nn.ReLU())
        ]))
# List version
class MLP(nn.Module):
    def __init__(self, h_sizes, out_size):
        ...
        self.hidden = nn.ModuleList()
        for k in range(len(h_sizes)-1):
            self.hidden.append(nn.Linear(h_sizes[k], h_sizes[k+1]))
        ...

# Dictionary version
class MLP(nn.Module):
    def __init__(self, h_sizes, out_size):
        ...
        self.hidden = nn.ModuleDict()
        for k in range(len(h_sizes)-1):
            self.hidden[f'linear_{k}'] = nn.Linear(h_sizes[k], h_sizes[k+1])
        ...
# stack up LSTM outputs
out = out.contiguous().view(-1, self.n_hidden)
# use clip_grad_norm_ to help prevent exploding gradients
nn.utils.clip_grad_norm_(net.parameters(), clip)