Search code examples
pythonmachine-learningkeras

Is there any difference between this keras script and my python class?


I am wondering whether there is any difference between this Keras script:

latent_dim = 2
latent_inputs = keras.Input(shape=(latent_dim,))
x = layers.Dense(7 * 7 * 64, activation="relu")(latent_inputs)
x = layers.Reshape((7, 7, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")

and my object-oriented implementation:

class Decoder(keras.Model):
    def __init__(self, latent_dimension):
        super(Decoder, self).__init__()
        self.latent_dim = latent_dimension
        self.dense1 = layers.Dense(7 * 7 * 64, activation="relu")
        self.reshape = layers.Reshape((7, 7, 64))
        self.deconv1 = layers.Conv2DTranspose(64, 3, 2, "same", activation="relu")
        self.deconv2 = layers.Conv2DTranspose(32, 3, 2, "same", activation="relu")

    def call(self, inputs, training=None, mask=None):
        x = self.dense1(inputs)
        x = self.reshape(x)
        x = self.deconv1(x)
        decoder_outputs = self.deconv2(x)
        return decoder_outputs

invoked in

if __name__ == '__main__':
    latent_dim = 2
    decoder = Decoder(latent_dim)

Solution

  • No, they are not. I forgot to add the third (de)convolutional layer:

    self.deconv3 = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")
    

    which converts the output from shape (None, 28, 28, 32) to (None, 28, 28, 1)