Search code examples
tensorflowkerasneural-networkfunctional-programmingartificial-intelligence

AssertionError in Functional Model with Multiple Inputs when moving from TF1 to TF2


Hi I am trying to convert an old model from running on TF1 to TF2 and have been running into some issues. Been using google colab to switch between TF1 and TF2 and everything seems to run fine using TF1 but doesn't with TF2. I have replicated the problem with the short bit of code below.


from keras.layers import *
from keras import Model
from keras.backend import squeeze

def create_model():

    inputA = Input(shape=(1,))
    x = Dense(1)(inputA)
    x = Model(inputs=inputA, outputs=x)

    print(x.predict([0.1]))
    
    inputB = Input(shape=(1,))
    y = Dense(1)(inputB)
    y = Model(inputs=inputB, outputs=y)
    
    print(y.predict([0.1]))
    
    combined  = concatenate(inputs = [x.output,y.output])
    model = Model(inputs=[x.input, y.input], outputs=combined)
    
    return model


if (__name__  == "__main__") :
    model = create_model()
    model.compile(loss='mse',optimizer='RMSprop')
    model.summary()

    print(model.predict([[0.1],[0.1]]))


Here is the error using TF2:

AssertionError: in user code:

    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1462 predict_function  *
        return step_function(self, iterator)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1452 step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2945 _call_for_each_replica
        return fn(*args, **kwargs)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1445 run_step  **
        outputs = model.predict_step(data)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1418 predict_step
        return self(x, training=False)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py:985 __call__
        outputs = call_fn(inputs, *args, **kwargs)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/functional.py:386 call
        inputs, training=training, mask=mask)
    /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/functional.py:517 _run_internal_graph
        assert x_id in tensor_dict, 'Could not compute output ' + str(x)

    AssertionError: Could not compute output Tensor("concatenate/concat:0", shape=(None, 2), dtype=float32)

Any assistance will be greatly appreciated.

Thanks, V_W


Solution

  • You may modify your code like,

    from tf.keras.layers import *
    from tf.keras import Model
    
    def create_model():
    
        inputA = Input(shape=(1,))
        x = Dense(1)(inputA)
        modelA = Model(inputs=inputA, outputs=x)
    
        print(modelA.predict([0.1]))
        
        inputB = Input(shape=(1,))
        y = Dense(1)(inputB)
        modelB = Model(inputs=inputB, outputs=y)
        
        print(modelB.predict([0.1]))
        
        concat = Concatenate()( [ x , y ] )
        model = Model(inputs=[ inputA, inputB ], outputs=concat )
        
        return model