Issue
Am new in the deep learning concept using pytorch and am try to build a binary classifier model. I have tried some of the solution here on stack overflow but I can't seem to solve it. Maybe it is due to the nature of my code. Can someone figure out what could be the cause of this error in my code Here is my code
import torch
import torch.nn as nn
import numpy as np
from sklearn.datasets import make_blobs
import matplotlib.pyplot as pyp
# creating a dummy dataset from the make_blobs dataset
number_of_samples=5000
#divide the dataset into training(80%) and testing(20%)
training_number=int(number_of_samples*0.8)
#creating the dummy datasest
x,y=make_blobs(n_samples=number_of_samples,centers=2,n_features=64,cluster_std=10,random_state=2020)
y=y.reshape(-1,1)
#converting the numpy arrays into torch tensors
x,y=torch.from_numpy(x),torch.from_numpy(y)
x,y=x.float(),y.float()
#splitting the datasets into training and testing
x_train,x_test=x[:training_number],x[training_number:]
y_train,y_test=y[:training_number],y[training_number:]
#printing the shapes of each dataset
print("x_train shape:",x_train.shape)
print("x_test shape:",x_test.shape)
print("y_train shape:",y_train.shape)
print("y_test shape:",y_test.shape)
#a class to define the neural network us torch nn module
#neural network will have 3 hidden layers and 1 output layer
#hidden layers will have 64,256 and 1024 neurons
#output layer will have a single neuron
class neuralnetwork(nn.Module):
def _init_(self):
super().__init__()
torch.manual_seed(2020)
self.fc1 = nn.Linear(64, 256)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(256, 1024)
self.relu2 = nn.ReLU()
self.out = nn.Linear(1024, 1)
self.final = nn.Sigmoid()
def forward(self, x):
op = self.fc1(x)
op = self.relu1(op)
op = self.fc2(op)
op = self.relu2(op)
op = self.out(op)
y = self.final(op)
return y
#defining the loss,optimizer and training function for the neural network
def train_network(model,optimizer,loss_function,num_epochs,batch_size,x_train,y_train):
#start model training
model.train()
loss_for_every_epoch=nn.ModuleList()
for epoch in range(num_epochs):
train_loss=0.0
for i in range(0,x_train.shape[0],batch_size):
#extract train batch from x and y
input_data=x_train[i:min(x_train.shape[0]),i+batch_size]
labels=y_train[i:min(y_train.shape[0]),i+batch_size]
#set gradients to zero before beginning optimization
optimizer.zero_grad()
#forwad pass
output_data=model(input_data)
#calculate loss
loss=loss_function(output_data,labels)
#backpropagate
loss.backward()
#update weights
optimizer.step()
train_loss+=loss.item()*batch_size
print("Epoch: {} - Loss:{:.4f}".format(epoch+1,train_loss ))
loss_for_every_epoch.extend([train_loss])
#predict
y_test_prediction=model(x_test)
a=np.where(y_test_prediction>0.5,1,0)
return loss_for_every_epoch
#create an object of the class
model=neuralnetwork()
#define the loss function
loss_function = nn.BCELoss()#binary cross entropy loss function
#define optimizer
adam_optimizer=torch.optim.Adam(params=model.parameters(),lr=0.001)
#define epochs and batch size
number_of_epochs=100
batch_size=16
#Calling the function for training and pass model, optimizer, loss and related paramters
adam_loss=train_network(model,adam_optimizer,loss_function,number_of_epochs,batch_size,x_train,y_train)
I get the error
Value error:optimizer got an empty parameter list
The error is mainly generated from this section of code
#create an object of the class
model=neuralnetwork()
#define the loss function
loss_function = nn.BCELoss()#binary cross entropy loss function
#define optimizer
adam_optimizer=torch.optim.Adam(params=model.parameters(),lr=0.001)
#define epochs and batch size
number_of_epochs=100
batch_size=16
#Calling the function for training and pass model, optimizer, loss and related paramters
adam_loss=train_network(model,adam_optimizer,loss_function,number_of_epochs,batch_size,x_train,y_train)
What could be the cause in my code. Here is the full stack trade
Traceback (most recent call last)
g:\My Drive\CODE\pythondatascience\simpleneuralnetwork.ipynb Cell 7' in <cell line: 6>()
4 loss_function = nn.BCELoss()#binary cross entropy loss function
5 #define optimizer
----> 6 adam_optimizer=torch.optim.Adam(params=model.parameters(),lr=0.001)
7 #define epochs and batch size
8 number_of_epochs=100
File c:\Users\DAVE\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\optim\adam.py:81, in Adam.__init__(self, params, lr, betas, eps, weight_decay, amsgrad, maximize)
78 raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
79 defaults = dict(lr=lr, betas=betas, eps=eps,
80 weight_decay=weight_decay, amsgrad=amsgrad, maximize=maximize)
---> 81 super(Adam, self).__init__(params, defaults)
File c:\Users\DAVE\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\optim\optimizer.py:49, in Optimizer.__init__(self, params, defaults)
47 param_groups = list(params)
48 if len(param_groups) == 0:
---> 49 raise ValueError("optimizer got an empty parameter list")
50 if not isinstance(param_groups[0], dict):
51 param_groups = [{'params': param_groups}]
ValueError: optimizer got an empty parameter list
Solution
It should be def __init__
, not def _init_
in neuralnetwork
class. You are not initializing your model
object at all. Thus, it does not have any parameters.
Answered By - The Exile
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.