Search code examples
pythonnumpymachine-learninggradient-descent

python batch gradient descent does not converge


I have increased and decreased the learning rate and doesn't seem to converge or takes forever. if I set the learning rate to 0.0004 it slowly tries to converge but requires so many iteration I've had to set over 1mil+ iteration and only managed to go from 93 least squared error to 58

I am following Andrews NG forumla

Image of the graph with the gradient line:

image of the graph with the gradient line

my code:

import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import time


data = pd.read_csv('weight-height.csv')
x = np.array(data['Height'])
y = np.array(data['Weight'])


plt.scatter(x, y, c='blue')
plt.suptitle('Male')
plt.xlabel('Height')
plt.ylabel('Weight')
total = mpatches.Patch(color='blue', label='Total amount of data {}'.format(len(x)))
plt.legend(handles=[total])

theta0 = 0
theta1 = 0
learning_rate = 0.0004
epochs = 10000


# gradient = theta0 + theta1*X


def hypothesis(x):
    return theta0 + theta1 * x


def cost_function(x):
    return 1 / (2 * len(x)) * sum((hypothesis(x) - y) ** 2)

start = time.time()

for i in range(epochs):
    print(f'{i}/ {epochs}')
    theta0 = theta0 - learning_rate * 1/len(x) * sum (hypothesis(x) - y)
    theta1 = theta1 - learning_rate * 1/len(x) * sum((hypothesis(x) - y) * x)
    print('\ncost: {}\ntheta0: {},\ntheta1: {}'.format(cost_function(x), theta0, theta1))

end = time.time()

plt.plot(x, hypothesis(x), c= 'red')


print('\ncost: {}\ntheta0: {},\ntheta1: {}'.format(cost_function(x), theta0, theta1))

print('time finished at {} seconds'.format(end - start))

plt.show()

Solution

  • Your problem might be that you are updating theta0 and theta1 one by one:

    theta0 = theta0 - learning_rate * 1/len(x) * sum (hypothesis(x) - y)
    # the update to theta1 is now using the updated version of theta0
    theta1 = theta1 - learning_rate * 1/len(x) * sum((hypothesis(x) - y) * x)
    

    it would be better to re-write such that the 'hypothesis' function is called once and explicitly pass it the values of theta0 and theta1 to use, rather than using global values.

    # modify to explicitly pass theta0/1
    def hypothesis(x, theta0, theta1):
        return theta0 + theta1 * x
    
    # explicitly pass y
    def cost_function(x, y, theta0, theta1):
        return 1 / (2 * len(x)) * sum((hypothesis(x, theta0, theta1) - y) ** 2)
    
    for i in range(epochs):
        print(f'{i}/ {epochs}')
        # calculate hypothesis once
        delta = hypothesis(x, theta0, theta1)
        theta0 = theta0 - learning_rate * 1/len(x) * sum (delta - y)
        theta1 = theta1 - learning_rate * 1/len(x) * sum((delta - y) * x)
        print('\ncost: {}\ntheta0: {},\ntheta1: {}'.format(cost_function(x, y, theta0, theta1))