I have a function (gradient descent) in python that return me some values:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def read_data(file):
df = pd.read_excel(file)
x_data= np.array(df['X_axis'])
y_data = np.array(df['Y_axis'])
return x_data,y_data
x_data ,y_data = read_data('path\file')
alpha= 10**-8
#Auxiliaries Functions
...
here is the function:
def gradient(x_axis,y_axis):
current_iteration = 0
iterations_number= 100
#Initial values
#I guess that the problem is here. When I print those initial
#values,inside the function, they are different from what they should be
A1_inicial = y_axis[0]
A2_inicial = y_axis[-1]
x0_inicial = np.mean(x_axis)
dx_inicial = (np.std(x_axis))
while current_iteration < iterations_number:
sum_A1 = 0
sum_A2 = 0
sum_dx = 0
sum_x0 = 0
for x_values,y_values in zip(x_axis, y_axis):
sum_A1 += derivada_A1(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
sum_A2 += derivada_A2(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
sum_dx += derivada_dx(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
sum_x0 += derivada_x0(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
A1_inicial = A1_inicial - (alpha* sum_A1 )
A2_inicial = A2_inicial - (alpha* sum_A2 )
x0_inicial = x0_inicial - (alpha* sum_x0 )
dx_inicial = dx_inicial - (alpha* sum_dx )
return A1_inicial,A2_inicial,x0_inicial,dx_inicial
current_iteration+=1
So, the problem is that, whenever I use this function, my values that are being returned, are wrong. I know that they are wrong because I have the real values to compare to.
Here are how I am using the function:
new_y = []
A1_inicial,A2_inicial,x0_inicial,dx_inicial = gradient(x_axis,y_axis)
for x in x_axis:
new_y.append(A2_inicial + ((A1_inicial - A2_inicial) /(1+np.exp((x- x0_inicial)/dx_inicial))))
print("A1: {}".format(A1_inicial))
print("A2: {}".format(A2_inicial))
print("X0: {}".format(x0_inicial))
print("DX: {}".format(dx_inicial))
And I know that if I undo this function, like this:
current_iteration = 0
iterations_number= 100
A1_inicial = y_axis[0]
A2_inicial = y_axis[-1]
x0_inicial = np.mean(x_axis)
dx_inicial = (np.std(x_axis))
while current_iteration < iterations_number:
sum_A1 = 0
sum_A2 = 0
sum_dx = 0
sum_x0 = 0
for x_values,y_values in zip(x_axis, y_axis):
sum_A1 += derivada_A1(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
sum_A2 += derivada_A2(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
sum_dx += derivada_dx(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
sum_x0 += derivada_x0(A1_inicial,A2_inicial,x0_inicial,dx_inicial,x_values,y_values )
A1_inicial = A1_inicial - (alpha* sum_A1 )
A2_inicial = A2_inicial - (alpha* sum_A2 )
x0_inicial = x0_inicial - (alpha* sum_x0 )
dx_inicial = dx_inicial - (alpha* sum_dx )
And then:
new_y = []
for x in x_axis:
new_y.append(A2_inicial + ((A1_inicial - A2_inicial) /(1+np.exp((x- x0_inicial)/dx_inicial))))
print("A1: {}".format(A1_inicial))
print("A2: {}".format(A2_inicial))
print("X0: {}".format(x0_inicial))
print("DX: {}".format(dx_inicial))
It works, giving me the right values, but I don't know what is wrong with the function and whyt it does not work.
Thanks for the help!
In your gradient function, you have the return statement before you are incrementing your current_iteration variable. So it's never getting incremented. The function terminates at the point of return.
Add the current_iteration incrementor before return statement, and add the return statement after your while loop ends.