Search code examples
tensorflowgradient-descent

Why model.get_weights() is empty Is Tensorflow Bug?


I am tring implement MAML.I have a problem,so I write a simple version which can show my confusion. If you use 'optimizer.apply_gradients' update gradient,it can get model weight by 'model.get_weights()'.But if you update gradient by yourself,it just get empty list by 'model.get_weights()'.

import tensorflow as tf
from tensorflow.keras import layers, activations, losses, Model, optimizers, models
import numpy as np


class MAMLmodel(Model):
    def __init__(self):
        super().__init__()

        self.Dense1 = layers.Dense(2, input_shape=(3, ))
        self.Dense2 = layers.Dense(1)

    def forward(self, inputs):
        x = self.Dense1(inputs)
        x = self.Dense2(x)

        return x

def compute_loss(y_true, y_pred):
    return losses.mean_squared_error(y_true, y_pred)

x1 = [[[1], [1], [1]],
      [[1], [1], [1]],
      [[1], [1], [1]]]

y1 = [[[0], [0], [0]],
      [[0], [0], [0]],
      [[0], [0], [0]]]
x1 = tf.convert_to_tensor(x1)
y1 = tf.convert_to_tensor(y1) 

inner_train_step = 1
batch_size = 3
lr_inner = 0.001

model = MAMLmodel()
inner_optimizer = optimizers.Adam()

for i in range(batch_size):
    # If inner_train_step is 2 or bigger, the gradient is empty list.
    for inner_step in range(inner_train_step):
        with tf.GradientTape() as support_tape:
            support_tape.watch(model.trainable_variables)
            y_pred = model.forward(x1[i])
            support_loss = compute_loss(y1[i], y_pred)

        gradients = support_tape.gradient(support_loss, model.trainable_variables)
        # inner_optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        k = 0
        for j in range(len(model.layers)):
            model.layers[j].kernel = tf.subtract(model.layers[j].kernel, tf.multiply(lr_inner, gradients[k]))
            model.layers[j].bias = tf.subtract(model.layers[j].bias, tf.multiply(lr_inner, gradients[k + 1]))
            k += 2

    # If you use 'optimizer.apply_gradients' update gradient,it can print weights.
    # But if you update gradient by yourself,it just print empty list.
    print(model.get_weights())

I can't find my code problem,so I think it is tensorflow bug.Please help me!!!I can't sleepy because the error.


Solution

  • It is not a tensorflow bug :) You are updating the Variables of your model with basic Tensors, so in the second iteration, when you call .gradient(support_loss, model.trainable_variables) your model actually doesn't have any trainable variables anymore. Modify your code like so to use the methods for manipulating Variables:

    import tensorflow as tf
    from tensorflow.keras import layers, activations, losses, Model, optimizers, models
    import numpy as np
    
    
    class MAMLmodel(Model):
        def __init__(self):
            super().__init__()
    
            self.Dense1 = layers.Dense(2, input_shape=(3, ))
            self.Dense2 = layers.Dense(1)
    
        def forward(self, inputs):
            x = self.Dense1(inputs)
            x = self.Dense2(x)
    
            return x
    
    def compute_loss(y_true, y_pred):
        return losses.mean_squared_error(y_true, y_pred)
    
    x1 = [[[1], [1], [1]],
          [[1], [1], [1]],
          [[1], [1], [1]]]
    
    y1 = [[[0], [0], [0]],
          [[0], [0], [0]],
          [[0], [0], [0]]]
    x1 = tf.convert_to_tensor(x1)
    y1 = tf.convert_to_tensor(y1) 
    
    inner_train_step = 2
    batch_size = 3
    lr_inner = 0.001
    
    model = MAMLmodel()
    inner_optimizer = optimizers.Adam()
    
    for i in range(batch_size):
        # If inner_train_step is 2 or bigger, the gradient is empty list.
        for inner_step in range(inner_train_step):
            with tf.GradientTape() as support_tape:
                support_tape.watch(model.trainable_variables)
                y_pred = model.forward(x1[i])
                support_loss = compute_loss(y1[i], y_pred)
    
            gradients = support_tape.gradient(support_loss, model.trainable_variables)
            # inner_optimizer.apply_gradients(zip(gradients, model.trainable_variables))
            print(f'Number of computed gradients: {len(gradients)}')
            k = 0
            for j in range(len(model.layers)):
                model.layers[j].kernel.assign_sub(tf.multiply(lr_inner, gradients[k]))
                model.layers[j].bias.assign_sub(tf.multiply(lr_inner, gradients[k + 1]))
                k += 2
    
        # If you use 'optimizer.apply_gradients' update gradient,it can print weights.
        # But if you update gradient by yourself,it just print empty list.
        print(f'Get weights: {model.get_weights()}')