Search code examples
tensorflowautogradautodiff

Computing directional gradient w.r.t. the weights in TensorFlow


I want to compute the gradient w.r.t. the weights of a tf model but in only one direction:

import tensorflow as tf

model = tf.keras.Sequential([
  tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=False))

features = tf.random.normal((1000,10))
labels = tf.random.normal((1000,))

model.fit(features, labels, batch_size=32, epochs=1)

x_star = model.layers[0].weights #the layer has kernel and bias
v = tf.random.normal((10,1)) #direction of the gradient

def directional_loss(model, x, y, t):
    model.layers[0].set_weights([x_star[0] + t*v, x_star[1]])
    y_ = model(x)
    return model.loss(y_true=y, y_pred=y_)

def directional_grad(model, inputs, targets, t):
    with tf.GradientTape() as tape:
        loss_value = directional_loss(model, inputs, targets, t)
    return loss_value, tape.gradient(loss_value, t)

t=0.
loss_value, grads = directional_grad(model, features, labels, t)

But it returns the following error:

Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "<stdin>", line 4, in directional_grad
  File "C:\Users\pierr\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\eager\backprop.py", line 1070, in gradient
    if not backprop_util.IsTrainable(t):
  File "C:\Users\pierr\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\eager\backprop_util.py", line 58, in IsTrainable
    dtype = dtypes.as_dtype(dtype)
  File "C:\Users\pierr\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\framework\dtypes.py", line 725, in as_dtype
    raise TypeError(f"Cannot convert value {type_value!r} to a TensorFlow DType.")
TypeError: Cannot convert value 0.0 to a TensorFlow DType.

I think it is because the operation model.layers[0].set_weights is not "differentiable".

How can I fix it ? Alternatively, in TensorFlow can I compute the ouput of a layer by directly specifying the weights, something like y = layer(x, weights=w)?


Solution

  • Finally, I have found no other solution than re-creating a layer object from the class tf.keras.layers.Layer and re-defining its methods build and call, giving for a dense layer for example:

    class CustomLayer(tf.keras.layers.Layer):
      def __init__(self, x_star, direction_vectors, activation=None):
        super(CustomLayer, self).__init__()
        self.x_star = x_star # x_star[0] is the kernel matrix and x_star[1] is the bias
        self.direction_vectors = tf.reshape(direction_vectors, [direction_vectors.shape[0], x_star[0].shape[0], x_star[0].shape[1]])
        self.activation = activation
    
      def build(self, input_shape):
        self.kernel = self.add_weight("kernel", shape = [direction_vectors.shape[0],])
    
      def call(self, inputs):
        outputs = tf.matmul(inputs, self.x_star[0] + tf.tensordot(self.kernel, self.direction_vectors, axes=[[0],[0]])) + self.x_star[1]
        if self.activation is not None:
            outputs = self.activation(outputs)
        return outputs
    

    as it is explained in https://github.com/Bras-P/gibbs-measures-with-singular-hessian/blob/main/T4-expansion.ipynb