Issue
class Parent(nn.Module):
def __init__(self,in_features,z_dim, img_dim):
super().__init__()
self.my_child1 = Child1 (z_dim, img_dim)
self.my_child2 = Child2 (in_features)
def forward(self,input):
input=self.my_child1(input)
input=self.my_child2(input)
return input
def forward1(self,input):
input=self.my_child1(input)
return input
def forward2(self,input):
input=self.my_child2(input)
return input
class Child2(nn.Module):
def __init__(self, in_features):
super().__init__()
self.child2 = nn.Sequential(
nn.Linear(in_features, 128),
nn.LeakyReLU(0.01),
nn.Linear(128, 1),
nn.Sigmoid(),
)
def forward(self, x):
return self.child2(x)
class Child1(nn.Module):
def __init__(self, z_dim, img_dim):
super().__init__()
self.child1 = nn.Sequential(
nn.Linear(z_dim, 256),
nn.LeakyReLU(0.01),
nn.Linear(256, img_dim),
nn.Tanh(),
)
def forward(self, x):
return self.child1(x)
criterion=nn.BCELoss()
noise = torch.randn(batch_size, z_dim).to(device)
model=Parent(in_features,z_dim, img_dim)
output1=model(noise)
loss1=criterion(output1,torch.ones_like(output1))
loss2=criterion(output1,torch.zeroes_like(output1))
loss3=(loss1+loss2)/2
model.zero_grad()
loss3.backward(retain_graph=True)
print(loss3.grad)
I have not used any optimizer here because updating the parameters are done using a seperate formula which I will employ only after I get the gradients. The formula requires the gradients to be stored in a matrix. However, the gradient always prints “None”.
Solution
You can get the computed gradient for every parameter in your model with:
gradient = [el.grad for el in model.parameters()]
print(gradient)
Answered By - Deusy94
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.