I am trying to solve a linear regression problem using neural networks but my loss is coming to the power of 10 and is not reducing for training. I am using the house price prediction dataset(https://www.kaggle.com/c/house-prices-advanced-regression-techniques) and can't figure whats going wrong. Please help someone
X_train, X_test, y_train, y_test = train_test_split(df2, y, test_size=0.2)
X_tr=np.array(X_train)
y_tr=np.array(y_train)
X_te=np.array(X_test)
y_te=np.array(y_test)
def get_weights(shape,name): #(no of neurons*no of columns)
s=tf.truncated_normal(shape)
w=tf.Variable(s,name=name)
return w
def get_bias(number,name):
s=tf.truncated_normal([number])
b=tf.Variable(s,name=name)
return b
x=tf.placeholder(tf.float32,name="input")
w=get_weights([34,100],'layer1')
b=get_bias(100,'bias1')
op=tf.matmul(x,w)+b
a=tf.nn.relu(op)
fl=get_weights([100,1],'output')
b2=get_bias(1,'bias2')
op2=tf.matmul(a,fl)+b2
y=tf.placeholder(tf.float32,name='target')
loss=tf.losses.mean_squared_error(y,op2)
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
for i in range(0,1000):
sess.run(tf.global_variables_initializer())
_,l=sess.run([optimizer,loss],feed_dict={x:X_tr,y:y_tr})
print(l)
You are simple randomly initializing the variables in every training step. Just call sess.run(tf.global_variables_initializer())
only once before the loop.