Search code examples
tensorflowneural-networkbackpropagation

back propagate doesn't work in Tensorflow


I am a freshman in Tensorflow. Recently i want to fit a non-liner function"y = 1 + sin(x * pi/4)" with a two-layer neural network model. The code of program is following:

    #!/usr/bin/python

    import tensorflow as tf
    import numpy as np
    import math
    import matplotlib.pyplot as plt

    def check_mode():
        x_data = np.linspace(-2,2,100)
        y_data = [1 + math.sin(x  * math.pi/4) for x in x_data]
        w_1 = tf.Variable(tf.random_uniform([1,2],0,0.5))
        b_1 = tf.Variable(tf.random_uniform([1,2],0,0.5))
        w_2 = tf.Variable(tf.random_uniform([2,1],0,0.5))
        b_2 = tf.Variable(tf.random_uniform([1,1],0,0.5))
        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess,"mode.ckpt")
            print("lay1: ",sess.run(w_1),sess.run(b_1))
            print("lay2: ",sess.run(w_2),sess.run(b_2))
            a = []
            for x_i in x_data:
                w_plus_b = tf.matmul([[x_i]],w_1) + b_1
                a_1 = sigma(w_plus_b)
                a_2 = tf.matmul(a_1,w_2) + b_2
                a.append(sess.run(a_2[0][0]))
        print a
        draw_point(a,x_data,y_data)
        return
    def draw_point(a,x_data,y_data):
        fx,ax = plt.subplots()
        plt.plot(x_data,y_data,'o-')
        plt.plot(x_data,a,'k-')
        plt.show()  


    def sigma(x):
        return tf.div(tf.constant(1.0),tf.add(tf.constant(1.0),tf.exp(tf.negative(x))))

    def first_function():
        x_data = np.linspace(-2,2,100)
        y_data = [1 + math.sin(x  * math.pi/4) for x in x_data]

        x_i = tf.placeholder(tf.float32,[1,1])
        y_data_i = tf.placeholder(tf.float32,[1,1])

        w_1 = tf.Variable(tf.random_uniform([1,2],0,0.5))
        b_1 = tf.Variable(tf.random_uniform([1,2],0,0.5))

        w_2 = tf.Variable(tf.random_uniform([2,1],0,0.5))
        b_2 = tf.Variable(tf.random_uniform([1,1],0,0.5))

        z_1 = tf.add(tf.matmul(x_i,w_1), b_1)
        a_1 = sigma(z_1)
        a_2 = tf.add(tf.matmul(a_1,w_2),b_2)
        diff = tf.subtract(a_2,y_data_i)    

        loss = tf.multiply(diff,diff)

        optimizer = tf.train.GradientDescentOptimizer(0.1)
        train = optimizer.minimize(loss)

        init = tf.initialize_all_variables()

        sess = tf.Session()
        sess.run(init)
        saver = tf.train.Saver()
        for step in range(100):
            sess.run(train,feed_dict={x_i:[[x_data[step]]],y_data_i:[[y_data[step]]]})

        print("lay1: ",sess.run(w_1),sess.run(b_1))
        print("lay2: ",sess.run(w_2),sess.run(b_2))
        saver.save(sess,"mode.ckpt")
        return

    def main():
        #first_function()
        check_mode()
        return

    if __name__ == '__main__':
        main()

I not sure that whether and how do the tensorflow complete the back propagate automatically? I really do not want to implement the back propagate by myself. Is there something wrong about my code? Thank you very very much for your help!!


Solution

  • It certainly looks like it completes the back prop, but it doesn't look like you are training it very much. Specifically, your training loop only goes through each data point once. Try many many loops through the data.