Search code examples
pythonoptimizationstatisticsgradient-descentlog-likelihood

Gradient descent isn't working for maximum likelihood with logistic probability in python


So i have been trying to run a gradient based algorathim using python and i am not getting a convergent result. ill take a picture of what i am trying to put into code:

enter image description here

My code follows:

#base packages
#import sympy as sp
#from sympy import *
import numpy as np 
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression

x = np.array([0,0,0,0.1,0.1,0.3,0.3,0.9,0.9,0.9])
y = np.array([0.,0.,1.,0.,1.,1.,1.,0.,1.,1.])


def f(b0,b1,x,y):
    vec = [y[i]*np.log(1/(1+np.exp(-b0-b1*x[i]))) + (1-y[i])*np.log(1 - (1/(1+np.exp(-b0-b1*x[i])))) for i in range(len(y))]
    return sum(vec)

def dervf0(b0,b1,x,y):
    vec = [-y[i] + (1/(1+np.exp(-b0-b1*x[i]))) for i in range(len(x))]
    return np.sum(vec)
def dervf1(b0,b1,x,y):
    vec = [-x[i]*(y[i]-(1/(1+np.exp(-b0-b1*x[i])))) for i in range(len(x))]
    return sum(vec)



def G(f1,f2,b0,b1,x,y,tol,maxiter):
    v = np.array([b0,b1]) 
    theta_new  = v
    for i in range(maxiter):
        theta_new = v - 0.001*np.array([f1(b0,b1,x,y),f2(b0,b1,x,y)])
        if np.linalg.norm(theta_new - v) < tol: 
            break
        else:
            v = theta_new     
    return theta_new,i

The results should be a vector [-0.009,1.263]'. How ever i am not getting a convergent result. Any idea? Y


Solution

  • I didn't get why f1 and f2 are defined. The problem is you are not using the updated parameters b0, b1 for the next iteration. You are updating v but not b0, b1 Add this for every iteration

    b0 = v[0]
    b1 = v[1]
    

    Try this vectorized implementation. Vectorized implementation works faster. The final theta_new is [-0.00923525 1.26245957].

    
    import numpy as np
    import matplotlib.pyplot as plt
    from sklearn.linear_model import LogisticRegression
    
    x = np.array([0, 0, 0, 0.1, 0.1, 0.3, 0.3, 0.9, 0.9, 0.9])
    y = np.array([0., 0., 1., 0., 1., 1., 1., 0., 1., 1.])
    
    
    def f(b0, b1, x, y):
        return np.sum(
            np.multiply(y, np.log(1 / (1 + np.exp(-b0 - b1 * x)))) +
            np.multiply(1 - y, np.log(1 - (1 / (1 + np.exp(-b0 - b1 * x))))))
    
    
    def dervf0(b0, b1, x, y):
        return np.sum(-1 * y + (1 / (1 + np.exp(-b0 - b1 * x))))
    
    
    def dervf1(b0, b1, x, y):
        return np.sum(np.multiply(-1 * x, y - (1 / (1 + np.exp(-b0 - b1 * x)))))
    
    
    def G(v, x, y, tol, maxiter):
        theta_new = v
        for i in range(maxiter):
            theta_new = v - 0.001 * np.array(
                [dervf0(v[0], v[1], x, y),
                 dervf1(v[0], v[1], x, y)])
            if np.linalg.norm(theta_new - v) < tol:
                break
            else:
                v = theta_new
            print('i\t{}\tv\t{}\ttheta_new\t{}'.format(i, v, theta_new))
        return theta_new, i
    
    
    tol = 0.0000001
    maxiter = 1000000
    v = np.random.normal(0, 1, 2)
    theta_new, i = G(v, x, y, tol, maxiter)