Issue
I am building an RNN that makes a multi-class classification output for 11 dimensions in the output. The input are word embeddings that I took from a pretrained glove model.
The error I get is (full traceback at the end of the question):
ValueError: Expected input batch_size (1) to match target batch_size (11).
Note that here I use batch_size=1
, and the error says "expected batch size 1 to match target batch_size (11)". However, if I change batch size to 11, the error changes to:
ValueError: Expected input batch_size (11) to match target batch_size (121).
I think that the error is coming from the shape of text
which is torch.Size([11, 300])
, which lacks a sequence length, but I thought that if I do not assign a seq length it defaults to 1. However, I do not know how to add this in.
Training loop:
def train(model, device, train_loader, valid_loader, epochs, learning_rate):
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
train_loss, validation_loss = [], []
train_acc, validation_acc = [], []
for epoch in range(epochs):
#train
model.train()
running_loss = 0.
correct, total = 0, 0
steps = 0
for idx, batch in enumerate(train_loader):
text = batch["Sample"].to(device)
target = batch['Class'].to(device)
print(text.shape, target.shape)
text, target = text.to(device), target.to(device)
# add micro for coding training loop
optimizer.zero_grad()
print(text.shape)
output, hidden = model(text.unsqueeze(1))
#print(output.shape, target.shape, target.view(-1).shape)
loss = criterion(output, target.view(-1))
loss.backward()
optimizer.step()
steps += 1
running_loss += loss.item()
# get accuracy
_, predicted = torch.max(output, 1)
print(predicted)
#predicted = torch.round(output.squeeze())
total += target.size(0)
correct += (predicted == target).sum().item()
train_loss.append(running_loss/len(train_loader))
train_acc.append(correct/total)
print(f'Epoch: {epoch + 1}, '
f'Training Loss: {running_loss/len(train_loader):.4f}, '
f'Training Accuracy: {100*correct/total: .2f}%')
# evaluate on validation data
model.eval()
running_loss = 0.
correct, total = 0, 0
with torch.no_grad():
for idx, batch in enumerate(valid_loader):
text = batch["Sample"].to(device)
print(type(text), text.shape)
target = batch['Class'].to(device)
target = torch.autograd.Variable(target).long()
text, target = text.to(device), target.to(device)
optimizer.zero_grad()
output = model(text)
loss = criterion(output, target)
running_loss += loss.item()
# get accuracy
_, predicted = torch.max(output, 1)
#predicted = torch.round(output.squeeze())
total += target.size(0)
correct += (predicted == target).sum().item()
validation_loss.append(running_loss/len(valid_loader))
validation_acc.append(correct/total)
print (f'Validation Loss: {running_loss/len(valid_loader):.4f}, '
f'Validation Accuracy: {100*correct/total: .2f}%')
return train_loss, train_acc, validation_loss, validation_acc
This is how I call the training loop:
# Model hyperparamters
#vocab_size = len(word_array)
learning_rate = 1e-3
hidden_dim = 100
output_size = 11
input_size = 300
epochs = 10
n_layers = 2
# Initialize model, training and testing
set_seed(SEED)
vanilla_rnn_model = VanillaRNN(input_size, output_size, hidden_dim, n_layers)
vanilla_rnn_model.to(DEVICE)
vanilla_rnn_start_time = time.time()
vanilla_train_loss, vanilla_train_acc, vanilla_validation_loss, vanilla_validation_acc = train(vanilla_rnn_model,
DEVICE,
train_loader,
valid_loader,
epochs = epochs,
learning_rate = learning_rate)
This is how I create the dataloaders:
# Splitting dataset
# define a batch_size, I'll use 4 as an example
batch_size = 1
train_dset = CustomDataset(X2, y) # create data set
train_loader = DataLoader(train_dset, batch_size=batch_size, shuffle=True) #load data with batch size
valid_dset = CustomDataset(X2, y)
valid_loader = DataLoader(valid_dset, batch_size=batch_size, shuffle=True)
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
Full traceback:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-23-bfd2f8f3456f> in <module>()
19 valid_loader,
20 epochs = epochs,
---> 21 learning_rate = learning_rate)
22 print("--- Time taken to train = %s seconds ---" % (time.time() - vanilla_rnn_start_time))
23 #test_accuracy = test(vanilla_rnn_model, DEVICE, test_iter)
3 frames
<ipython-input-22-16748701034f> in train(model, device, train_loader, valid_loader, epochs, learning_rate)
47 output, hidden = model(text.unsqueeze(1))
48 #print(output.shape, target.shape, target.view(-1).shape)
---> 49 loss = criterion(output, target.view(-1))
50 loss.backward()
51 optimizer.step()
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/loss.py in forward(self, input, target)
1119 def forward(self, input: Tensor, target: Tensor) -> Tensor:
1120 return F.cross_entropy(input, target, weight=self.weight,
-> 1121 ignore_index=self.ignore_index, reduction=self.reduction)
1122
1123
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2822 if size_average is not None or reduce is not None:
2823 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2824 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2825
2826
ValueError: Expected input batch_size (1) to match target batch_size (11).
Solution
You should not be using .view(-1)
. This line:
loss = criterion(output, target.view(-1))
should be:
loss = criterion(output, target)
It is effectively removing your batch dimension. For batch_size=1
, it changes (1, 11)
to (11,)
. When you changed batch_size
to 11, then the view changed the shape from (11, 11)
to (121,)
, hence the error.
Answered By - Berriel
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.