Why inconsistensy in backward propagation (pytorch)?
import torch
class MyLayer(torch.nn.Module):
def __init__(self):
mytensor = torch.tensor(3,3)
torch.nn.init.uniform(mytensor,0,1)
#add parameter with nn.Parameter()
#by default, it set requires gradient to true
self.mytensor = torch.nn.Parameter(mytensor)
def forward(self,input):
#use method provided by pytorch here
#it supports autograd, namely, auto-gradient computation
return input*self.mytensorimport pytorch
class RoundGradient(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.round()
@staticmethod
def backward(ctx, g):
return g
class ClampGradient(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.clamp(min=0,max=1)
@staticmethod
def backward(ctx, g):
x, = ctx.saved_tensors
grad_input = g.clone()
grad_input[x < 0] = 0
grad_input[x>1] = 1
return grad_inputLast updated
Was this helpful?