Search code examples
pythonpython-3.xcntk

Trouble implementing custom Negative Log-Likelihood loss in pet example


I am using a pet example to optimize the parameters of a dense layer connected to a softplus and outputing the parameters of a negative binomial distribution. My process has been:

1) Build a custom class for the negative log likelihood loss with forward and backward methods. It gets as input the distribution parameters and a target and outputs the loss (negative log likelihood of the modeled distribution evaluated on the target):

from scipy.special import gamma, digamma, binom
from cntk.ops.functions import UserFunction
import numpy as np

class NegBin_Loss_Deep_NoSP(UserFunction):
    def __init__(self, dis_params, target, name='NegBin_Loss_Deep_NoSP'):
        super(NegBin_Loss_Deep_NoSP, self).__init__([dis_params, target], name=name)

    def forward(self, arguments, device=None, outputs_to_retain=None):
        self.miu = arguments[0][0][0]
        self.alpha = arguments[0][0][1]
        target = arguments[1][0]
        #Compute the likelihood of the target
        LL = binom(target+(1/self.alpha)-1,target)*((1/(1+self.alpha*self.miu))**(1/self.alpha))*(((self.alpha*self.miu)/(1+self.alpha*self.miu))**target)
        log_ll = np.log(LL)
        # Loss is the negative log likelihood
        return target, -log_ll

    def backward(self, state, root_gradients, variables):
        target = state
        for idx in range(len(self.inputs)):
            if self.inputs[idx] in variables:
                gradient = np.array([-(target-self.miu)/(self.alpha*(self.miu**2)+self.miu), # miu derivative
                            -((self.miu*self.alpha+1)*np.log(self.miu*self.alpha+1)+(-self.miu*digamma((1/self.alpha)+target)+self.miu*digamma(1/self.alpha)+target-self.miu)*self.alpha-digamma((1/self.alpha)+target)+digamma(1/self.alpha))/(self.miu*(self.alpha**3)+(self.alpha**2))])[:,0] # alpha derivative
                variables[self.inputs[idx]] = gradient
        return root_gradients*gradient

    def infer_outputs(self):
        return [output_variable(self.inputs[1].shape, self.inputs[0].dtype, self.inputs[1].dynamic_axes)]

    @staticmethod
    def deserialize(inputs, name, state):
        f = NegBin_Loss_Deep_NoSP(inputs[0], inputs[1], name)
        return f

2) Numeric testing of the forward and backward method with a custom script. Instantiating the above class, pass an input array and use forward and backward to update the input array and validate the decreasing loss. Working as intended

def grad_clipper(grad_v, trunc_max=0.005,trunc_min=-0.005):
    if trunc_min >= trunc_max:
        raise Exception("lower clipping bound has to be lower than upper bound")
    for idx, value in enumerate(grad_v):
        grad_v[idx] = max(min(value,trunc_max),trunc_min)
    return grad_v


def convergence_test():
    iter_num = 500000
    noise_magnitude = 0.15
    target = np.array([3.]) # [0.5, 0]
    params = [5, 0.5]
    v = C.input_variable(shape=(2,), needs_gradient=True)
    t = C.input_variable(shape=(1,))
    lr = 0.0005
    a = NegBin_Loss_Deep_NoSP(v,t)
    for i in range(iter_num):
        target_new = (1+(0.5-np.random.rand())*noise_magnitude)*target
        state, loss = a.forward([[params],target_new], [a.output], set([a.output]))
        if i%1000 == 0:
            print("loss", loss)
        grad = grad_clipper(a.backward(state, np.ones_like(params), set([v])))
        if i%1000 == 0:
            print("params:", params, "gradient:", grad)
        params = np.sum([params,[l * lr for l in grad]],axis=0)
        params[1] = max(params[1], 5e-6)

convergence_test()  

3) Build the pet example model and optimize the distribution parameters with CNTK's learner. I can't get the parameters of the dense layer to be updated, what am I missing?

import cntk as C
from cntk.layers import *

# Simple model to experiment gradient descent on the weights of the dense layer 
def model0(x):    
    with default_options(initial_state = 0.1, enable_self_stabilization=True,init=glorot_uniform()):
        m = Dense(2, activation = None)(x)
        m = C.ops.softplus(m)
    return m

# input sequences
x = C.input_variable(shape=(2,))
t = C.input_variable(shape=(1,))

# create the model
m0 = model0(x)

# define the learning rate
learning_rate = 0.02
lr_schedule = C.learning_parameter_schedule(learning_rate)

# define loss and error function
loss = NegBin_Loss_Deep_NoSP(m0.output, t)
error = NegBin_Loss_Deep_NoSP(m0.output, t)

# use fsadagrad optimizer
momentum_schedule = momentum_schedule = C.momentum_schedule(0.9)
learner = C.fsadagrad(m0.parameters,
                      lr = lr_schedule,
                      momentum = momentum_schedule,
                      unit_gain = True)

trainer = C.Trainer(m0, (loss, error), [learner])

loss_summary = []
start = time.time()
x1 = np.array([3,0.002], dtype=np.float32)
y1 = np.array([2.5], dtype=np.float32)
for epoch in range(0, 10):
    trainer.train_minibatch({x: x1, t: y1})
    if epoch % (10 / 10) == 0:
        training_loss = trainer.previous_minibatch_loss_average
        loss_summary.append(training_loss)
        print("epoch: {}, loss: {:.5f}".format(epoch, training_loss))

4) Test the pet example with a native CNTK loss function, as suggested by snowflake. By adapting the stub in 3) I introduced a similar test with squared loss. I can confirm that in this way the model is updating its parameters and loss decreases with training. I believe this test circumscribes the problem to the custom loss function that I placed in 1)

x = C.input_variable(shape=(2,))
t = C.input_variable(shape=(2,)) # -> Changed in 4)

# create the model
m0 = model0(x)

# the learning rate
learning_rate = 0.02
lr_schedule = C.learning_parameter_schedule(learning_rate)

# loss function
loss = C.squared_error(m0.output, t) # -> Changed in 4)
error = C.squared_error(m0.output, t) # -> Changed in 4)

# use fsadagrad optimizer
momentum_schedule = C.momentum_schedule(0.9)
learner = C.fsadagrad(m0.parameters,
                      lr = lr_schedule,
                      momentum = momentum_schedule,
                      unit_gain = True)

trainer = C.Trainer(m0, (loss, error), [learner])

loss_summary = []
start = time.time()
x1 = np.array([3, 0.002], dtype=np.float32)
y1 = np.array([2.5, 0.005], dtype=np.float32) # -> Changed in 4)
for epoch in range(0, 10):
    trainer.train_minibatch({x: x1, t: y1})
    if epoch % (10 / 10) == 0:
        training_loss = trainer.previous_minibatch_loss_average
        loss_summary.append(training_loss)
        print("epoch: {}, loss: {:.5f}".format(epoch, training_loss))

What is causing the loss function in 1) to not allow my model to train?

Thank you


EDIT: Solved, thanks snowflake!


Solution

  • Given that you loss function takes in multiple inputs. variables would be a dictionary of variables.

    Doing this variables = gradient overwrites the dictionary.

    You should be doing something like variables[var] = ... # compute the gradient for var