I wrote my own Keras/Tensorflow layer. Passing images into it works fine, but using it in combination with other layers gives the underlying error. Somehow the output shape of my custom layer should be wrong, or some kind of "Nonetype"
.
In short, the custom layer transforms the image from colorspace A to B, then makes a histogram out of some channels. It's a preprocessing layer for the discriminator of a GAN
, and therefore has to be a part of the generator backpropagation model.
from keras import backend as K
from keras.layers import Layer
class Identity_Loss(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(Identity_Loss, self).__init__(**kwargs)
def build(self, input_shape):
super(Identity_Loss, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
assert isinstance(x, list)
input_1,input_2=x
# Transform BGR to RGB and than to HSV
channels = tf.unstack (input_1, axis=-1)
RGB = tf.stack ([channels[2], channels[1], channels[0]], axis=-1)
RGB=tf.cast(tf.multiply(tf.truediv(tf.add(RGB,1.0),2.0),255.0),dtype=tf.int32)
RGB=tf.cast(RGB,dtype=tf.float32)
HSV=tf.image.rgb_to_hsv(RGB,name=None)
###########################################################
SV=HSV[:,:,:,1:]
############################################################
#make mask binary and multiply with image
y=tf.math.greater(input_2, 0)
y=tf.cast(y, tf.float32, name=None)
HSV_mask = tf.math.multiply(HSV, y)
#Count color occurences ###########################
shape=tf.shape(HSV_mask)
length=shape[1]*shape[2]
#transform
Hue=HSV_mask[:,:,:,:1]
Hue=tf.cast(tf.multiply(Hue,255.0),dtype=tf.int32)
Hue2 = tf.reshape(Hue, [length])
#prevent that the shape changes
filler=tf.range(0,length, 1,dtype=tf.int32)
filler = tf.reshape(filler, [length])
Hue3 = tf.stack([Hue2,filler],axis=-1)
Hue3 = tf.reshape(Hue3, [2*length])
#Count Hue
y1, idx1, count1 = tf.unique_with_counts(Hue3)
maximum=tf.cast(tf.math.reduce_max(count1[1:257]),dtype=tf.int32)
diff=tf.reshape(count1[1:257],(16,16))
diff=tf.expand_dims(diff, axis=-1)
diff=tf.expand_dims(diff, axis=0)
diff=tf.truediv(diff,maximum)
diff=tf.cast(diff,dtype=tf.float32)
return [SV,HSV_mask,diff]
def compute_output_shape(self, input_shape):
assert isinstance(input_shape, list)
return [[None,None,2],[None,None,3],[None,None,1]]
This is corresponding error message when passing the custom layer output into another CNN layer for example:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-89-3caaa8c77e0c> in <module>()
5 c,d,e=mod([d,input_B])
6
----> 7 dd=model(d)
8
9 bb = Model([input_A,input_B],[c,d,dd])
3 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in __call__(self, inputs, **kwargs)
487 # Actually call the layer,
488 # collecting output(s), mask(s), and shape(s).
--> 489 output = self.call(inputs, **kwargs)
490 output_mask = self.compute_mask(inputs, previous_mask)
491
/usr/local/lib/python3.6/dist-packages/keras/engine/network.py in call(self, inputs, mask)
581 return self._output_tensor_cache[cache_key]
582 else:
--> 583 output_tensors, _, _ = self.run_internal_graph(inputs, masks)
584 return output_tensors
585
/usr/local/lib/python3.6/dist-packages/keras/engine/network.py in run_internal_graph(self, inputs, masks)
796 input_shapes = unpack_singleton(
797 [x._keras_shape for x in computed_tensors])
--> 798 shapes = to_list(layer.compute_output_shape(input_shapes))
799 uses_learning_phase = any(
800 [x._uses_learning_phase for x in computed_tensors])
/usr/local/lib/python3.6/dist-packages/keras/layers/convolutional.py in compute_output_shape(self, input_shape)
191 def compute_output_shape(self, input_shape):
192 if self.data_format == 'channels_last':
--> 193 space = input_shape[1:-1]
194 elif self.data_format == 'channels_first':
195 space = input_shape[2:]
TypeError: 'NoneType' object is not subscriptable
Short answer: add batch dimension to output shape of layer returned by compute_output_shape
method.
Long answer: Keras models always work on a batch of input samples and therefore all the input shape and output shape values in a Keras layer contain the batch dimension. Therefore, you need to consider that as well when computing the output shape of the layer:
def compute_output_shape(self, input_shape):
return [
[input_shape[0], None, None, 2],
[input_shape[0], None, None, 3],
[input_shape[0], None, None, 1]
]
Since batch size does not change from layer to layer, therefore you just need to add input_shape[0]
to the returned shapes as above.