Issue
Recently faced with such a problem: ValueError: too many values to unpack (expected 2).
import os
import natsort
from PIL import Image
import torchvision
import torch
import torch.optim as optim
from torchvision import transforms, models
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
root_dir = './images/'
class Col(Dataset):
def __init__(self, main_dir, transform):
self.main_dir = main_dir
self.transform = transform
all_images = self.all_img(main_dir = main_dir)
self.total_imges = natsort.natsorted(all_images)
def __len__(self):
return len(self.total_imges)
def __getitem__(self, idx):
img_loc = os.path.join(self.total_imges[idx])
image = Image.open(img_loc).convert("RGB")
tensor_image = self.transform(image)
return tensor_image
def all_img(self, main_dir):
img = []
for path, subdirs, files in os.walk(main_dir):
for name in files:
img.append(os.path.join(path, name))
return img
model = models.resnet18(pretrained=False)
model.fc = nn.Sequential(nn.Linear(model.fc.in_features, 256),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(256, 100),
nn.ReLU(),
nn.Dropout(p=0.4),
nn.Linear(100,9))
# model.load_state_dict(torch.load('model.pth'))
for name, param in model.named_parameters():
if("bn" not in name):
param.requires_grad = False
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5457, 0.5457, 0.5457], std=[0.2342, 0.2342, 0.2342])
])
data = Col(main_dir=root_dir, transform=transform)
dataset = torch.utils.data.DataLoader(data, batch_size=130)
train_set, validate_set= torch.utils.data.random_split(dataset, [round(len(dataset)*0.7), (len(dataset) - round(len(dataset)*0.7))])
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model.to(device)
def train(model, optimizer, loss_fn, train_set, validate_set, epochs=20, device="cpu"):
for epoch in range(1, epochs+1):
training_loss = 0.0
valid_loss = 0.0
model.train()
for batch in train_set:
optimizer.zero_grad()
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
output = model(inputs)
loss = loss_fn(output, targets)
loss.backward()
optimizer.step()
training_loss += loss.data.item() * inputs.size(0)
training_loss /= len(train_set.dataset)
model.eval()
num_correct = 0
num_examples = 0
for batch in validate_set:
inputs, targets = batch
inputs = inputs.to(device)
output = model(inputs)
targets = targets.to(device)
loss = loss_fn(output,targets)
valid_loss += loss.data.item() * inputs.size(0)
correct = torch.eq(torch.max(F.softmax(output, dim=1), dim=1)[1], targets)
num_correct += torch.sum(correct).item()
num_examples += correct.shape[0]
valid_loss /= len(validate_set.dataset)
print('Epoch: {}, Training Loss: {:.2f}, Validation Loss: {:.2f}, accuracy = {:.2f}'.format(epoch, training_loss,
valid_loss, num_correct / num_examples))
optimizer = optim.Adam(model.parameters(), lr=0.0001)
But the call to this function
train(model, optimizer,torch.nn.CrossEntropyLoss(), train_set.dataset, validate_set.dataset, epochs=100, device=device)
gives this error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_4828/634509595.py in <module>
----> 1 train(model, optimizer,torch.nn.CrossEntropyLoss(), train_set.dataset, validate_set.dataset, epochs=100, device=device)
/tmp/ipykernel_4828/1473922939.py in train(model, optimizer, loss_fn, train_set, validate_set, epochs, device)
6 for batch in train_set:
7 optimizer.zero_grad()
----> 8 inputs, targets = batch
9 inputs = inputs.to(device)
10 targets = targets.to(device)
ValueError: too many values to unpack (expected 2)
Solution
Batch doesn't contain both the inputs and the targets. Your problem is just that getitem returns only tensor_image (which is presumably the inputs) and not whatever targets should be.
This means that the code:
inputs, targets = batch
must become:
inputs = batch
since there is just one return variable (return tensor_image
) in each batch
of the train_set
.
Answered By - Sam H
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.