Search code examples
pythontensorflowkerastf.keraskeras-layer

Keras will not allow building a model that has custom layers inside


This is my code for a Resnet:

class Resnet50(keras.Model):
    def __init__(self):
        super().__init__()

    def RBB(self, input_shape, K):
        inputs = keras.Input(input_shape)
        X = layers.Conv2D(K, kernel_size=(1, 1), activation="relu")(inputs)
        X = layers.Conv2D(K, kernel_size=(1, 1), activation="relu")(X)
        X = layers.Conv2D(K, kernel_size=(1, 1), activation="relu")(X)
        skip_conn = inputs + X
        output = keras.activations.relu(skip_conn)
        model = keras.Model(inputs=inputs, outputs=output)
        return model

    def complete_network(self, input_shape, RBB=RBB):
        inputs = keras.Input(input_shape)
        X = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(inputs)
        X = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(X)
        X = RBB(K=64)(X)
        X = RBB(K=64)(X)
        X = RBB(K=64)(X)
        X = RBB(K=128)(X)
        X = layers.MaxPooling2D(pool_size=(2, 2))(X)
        X = RBB(K=128)(X)
        X = RBB(K=128)(X)
        X = RBB(K=128)(X)
        X = RBB(K=256)(X)
        X = layers.MaxPooling2D(pool_size=(2, 2))(X)    
        X = RBB(K=256)(X)
        X = RBB(K=256)(X)
        X = RBB(K=256)(X)
        X = RBB(K=256)(X)
        X = RBB(K=256)(X)
        X = RBB(K=512)(X)
        X = layers.MaxPooling2D(pool_size=(2, 2))(X)  
        X = RBB(K=512)(X)
        X = RBB(K=512)(X)
        X = layers.AveragePooling2D(pool_size=(2, 2))(X)
        X = layers.Flatten()(X)
        X = layers.Dense(1056)(X)
        output = layers.Dense(num_classes, activation="softmax")
        model = keras.Model(inputs=inputs, outputs=output)
        return model

    def call(self, inputs):
        x = self.complete_network(inputs)
        return x

net = Resnet50()
net.build(input_shape=(224,224))

net.summary()

And this is the error when I run the summary operation:

OperatorNotAllowedInGraphError            Traceback (most recent call last)

/usr/local/lib/python3.7/dist-packages/keras/engine/training.py in build(self, input_shape)
    439         try:
--> 440           self.call(x, **kwargs)
    441         except (tf.errors.InvalidArgumentError, TypeError) as e:

5 frames

OperatorNotAllowedInGraphError: iterating over `tf.Tensor` is not allowed in Graph execution. Use Eager execution or decorate this function with @tf.function.


During handling of the above exception, another exception occurred:

ValueError                                Traceback (most recent call last)

/usr/local/lib/python3.7/dist-packages/keras/engine/training.py in build(self, input_shape)
    440           self.call(x, **kwargs)
    441         except (tf.errors.InvalidArgumentError, TypeError) as e:
--> 442           raise ValueError('You cannot build your model by calling `build` '
    443                            'if your layers do not support float type inputs. '
    444                            'Instead, in order to instantiate and build your '

ValueError: You cannot build your model by calling `build` if your layers do not support float type inputs. Instead, in order to instantiate and build your model, call your model on real tensor data (of the correct dtype).

The actual error from `call` is: iterating over `tf.Tensor` is not allowed in Graph execution. Use Eager execution or decorate this function with @tf.function..

Any idea on how I can fix this code?


Solution

  • You are building many models inside a model, and creating a model inside a call, and creating tensors without keras layers. All of this is very weird and the problem could be anywhere.

    (Well... you can create models inside models, but the rest is too strange)

    Just build a single model with keras layers and that's it:

    def skipFunc(inputs):
        return inputs[0] + inputs[1]       
    
    def RBB(inputTensor, K):
        X = layers.Conv2D(K, kernel_size=(1, 1), activation="relu")(inputTensor)
        X = layers.Conv2D(K, kernel_size=(1, 1), activation="relu")(X)
        X = layers.Conv2D(K, kernel_size=(1, 1), activation="relu")(X)
        
        skip_conn = layers.Lambda(skipFunc)([X, inputTensor])
        output = layers.Activation("relu")(skip_conn)
        
        return output
    
    
    def complete_network(input_shape, RBB=RBB):
        inputs = keras.Input(input_shape)
        X = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(inputs)
        X = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(X)
        X = RBB(X, K=64)
        X = RBB(X, K=64)
        X = RBB(X, K=64)
        X = RBB(X, K=128)
        X = layers.MaxPooling2D(pool_size=(2, 2))(X)
        X = RBB(X, K=128)
        X = RBB(X, K=128)
        X = RBB(X, K=128)
        X = RBB(X, K=256)
        X = layers.MaxPooling2D(pool_size=(2, 2))(X)    
        X = RBB(X, K=256)
        X = RBB(X, K=256)
        X = RBB(X, K=256)
        X = RBB(X, K=256)
        X = RBB(X, K=256)
        X = RBB(X, K=512)
        X = layers.MaxPooling2D(pool_size=(2, 2))(X)  
        X = RBB(X, K=512)
        X = RBB(X, K=512)
        X = layers.AveragePooling2D(pool_size=(2, 2))(X)
        X = layers.Flatten()
        X = layers.Dense(1056)
        output = layers.Dense(num_classes, activation="softmax")
        model = keras.Model(inputs=inputs, outputs=output)
        return model
    
    net = complete_network(input_shape=(224,224))
    net.summary()