I wrote the below custom layer and when I try to add a Dense layer afterwards it gets the input_shape wrong and expects the shape[-1] dimension of the tensor before the layer.
from keras import backend as K
from keras.engine.topology import Layer
from keras.layers import Conv2D, Dense, Input
class SMSO(Layer):
def __init__(self, feature_dim=256, **kwargs):
self.feature_dim = feature_dim
super(SMSO, self).__init__(**kwargs)
def build(self, input_shape):
self.scale = self.add_weight('scale',
shape=(1, self.feature_dim),
initializer='ones',
trainable=True)
self.offset = self.add_weight('offset',
shape=(1, self.feature_dim),
initializer='zeros',
trainable=True)
super(SMSO, self).build(input_shape)
def call(self, x):
x = x - K.mean(x, axis=(1, 2), keepdims=True)
x = K.square(Conv2D(self.feature_dim, 1)(x))
x = K.sqrt(K.sum(x, axis=(1, 2)))
return self.scale * x + self.offset
x = Input(shape=(10, 10, 32))
l1 = SMSO(16)(x)
print(l1.shape)
l2 = Dense(10)(l1)
Here is the code to reproduce the error. l1.shape gives (?, 16) as expected but the next line fails.
Adding a compute_output_shape function solves the problem.
def compute_output_shape(self, input_shape):
return (input_shape[0], self.feature_dim)
Any layer that modifies shape needs to have a compute_output_shape.