Search code examples
pythontensorflowgradientbackpropagationeigenvector

Returning mutiple values in the input function for `tf.py_func`


I'm trying to set custom gradients using tf.py_func and tf.RegisterGradient. Specifically, I'm trying to take a gradient of an eigen value w.r.t its Laplacian. I got the basic thing working, where my python function returns one value, which is the eigen value. But for the gradient to work, I also need to return the eigen vector. But trying to return 2 values results in pyfunc_1 returns 2 values, but expects to see 1 values. How can I solve this error?

Here's the full code of my custom gradient.

import numpy as np
import networkx as nx
from scipy import sparse
import tensorflow as tf
from tensorflow.python.framework import ops

# python function to calculate the second eigen value
def calc_second_eigval(X):
    G = nx.from_numpy_matrix(X)
    degree_dict = nx.degree(G)
    degree_list = [x[1] for x in degree_dict]
    lap_matrix = sparse.diags(degree_list, 0)-nx.adjacency_matrix(G)
    eigval, eigvec = sparse.linalg.eigsh(lap_matrix, 2, sigma=0, which='LM')
    return float(eigval[0]), eigvec[:,0]

# define custom py_func which takes also a grad op as argument:
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):

    # Need to generate a unique name to avoid duplicates:
    rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

    tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
    g = tf.get_default_graph()
    with g.gradient_override_map({"PyFunc": rnd_name}):
        return tf.py_func(func, inp, Tout, stateful=stateful, name=name)

# define custom second_eigval function for tensorflow
def custom_second_eigval(x, name=None):

    with ops.op_scope([x], name, "SecondEigValGrad") as name:
        eigval = py_func(calc_second_eigval,
                        [x],
                        [tf.float64],
                        name=name,
                        grad=_SecondEigValGrad)  # <-- here's the call to the gradient
        return eigval[0]

# actual gradient:
def _SecondEigValGrad(op, grad):
    # TODO: this should involve eigen vectors
    x = op.inputs[0]    
    return grad * 20 * x 

X = tf.Variable(tf.random_normal([200,200],dtype=tf.float64))

second_eigval = custom_second_eigval(X)
optimizer = tf.train.AdamOptimizer(0.01)
update = tf.contrib.slim.learning.create_train_op(second_eigval, optimizer,summarize_gradients=True)
with tf.Session() as sess:
    tf.initialize_all_variables().run()
    print(update.eval())

Solution

  • Your Tout must be (tf.float64,tf.float64) instead of [tf.float64]

    eigval = py_func(calc_second_eigval,
                            [x],
                            (tf.float64,tf.float64),
                            name=name,
                            grad=_SecondEigValGrad) 
    

    Here is an working demo

    import tensorflow as tf
    
    # Function in python
    def dummy(x):
        return [x,x]
    
    print(dummy([1.0,2.0]))
    
    tf_fun = tf.py_func(dummy,[[1.0,2.0]],(tf.float32,tf.float32))
    
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
    
        print(sess.run(tf_fun))