Search code examples
pythondeep-learningpytorchcomputer-vision

Grad-cam always placing the heatmap in the same area


Here is the part of my code relevant to the issue:

def forward_hook(module,input,output):
    activation.append(output)

def backward_hook(module,grad_in,grad_out):
    grad.append(grad_out[0])


model.layer4[-1].register_forward_hook(forward_hook)
model.layer4[-1].register_backward_hook(backward_hook)
grad=[]
activation=[]


loader_iter = iter(dataloader_test)
for _ in range(50):
    data, target, meta = next(loader_iter)       
    count1 = 0
    for d, t, m in zip(data, target, meta):

        hm_dogs = []
        heatmap = []
        d, t = map(lambda x: x.to(device), (d, t))
        
        #remove batch size
        d = d.unsqueeze(0)
        output = model(d)

        output[:, 4].backward()
        #get the gradients and activations collected in the hook
        grads=grad[count1].cpu().data.numpy().squeeze()
        fmap=activation[count1].cpu().data.numpy().squeeze()

I printed the grads and they are all looking the same despite the iteration. Anyone have some ideas for me?


Solution

  • seems like you are accumulating the gradients and activations for each iteration of the loop. clear the grad and activation lists at the start of each iteration, right before the inner loop.

    loader_iter = iter(dataloader_test)
    for _ in range(50):
        grad.clear()
        activation.clear()
    
        data, target, meta = next(loader_iter)
        count1 = 0
        for d, t, m in zip(data, target, meta):
            hm_dogs = []
            heatmap = []
            d, t = map(lambda x: x.to(device), (d, t))
    
            # remove batch size
            d = d.unsqueeze(0)
            output = model(d)
    
            output[:, 4].backward()
            # get the gradients and activations collected in the hook
            grads = grad[count1].cpu().data.numpy().squeeze()
            fmap = activation[count1].cpu().data.numpy().squeeze()