Search code examples
pythonnumpytensorflowkeraskeras-layer

InvalidArgumentError: In[0].dim(0) and In[1].dim(0) must be the same: [1,125,150] vs [32,150,125]


I'm trying to create a custom layer merging 2 sources. I am receiving the error "InvalidArgumentError: In[0].dim(0) and In[1].dim(0) must be the same: [1,125,150] vs [32,150,125]." The code runs if I set the batch_size to 1 so then have [1,125,150] vs [1,150,125]; however, the loss then doesn't change so still not root cause. I think that I need to use batch size instead of just expand dims

class mergeLayer(L.Layer):
    def __init__(self, output_dim, **kwargs):
        self.output_dim = output_dim
        super(mergeLayer,self).__init__()
        self.kernel_initializer = INIT.get('uniform')

    def build(self, input_shape):
        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='kernel',shape=input_shape[1:],initializer=self.kernel_initializer,trainable=True)
        super(mergeLayer,self).build(input_shape) # Be sure to call this somewhere!

    def call(self, x):
        temp = K.batch_dot(tf.expand_dims(self.kernel,0),tf.transpose(x,perm=[0,2,1]))+1
        return temp
    def compute_output_shape(self, input_shape):
        return input_shape

Below is code fitting the model. Again, if I change batch_size to 1 here, I can the code to run but loss stays the same.

modelMerge.fit(x=[train1,train2],y=cats,epochs=100,batch_size=32,shuffle='batch')
score = modelMerge.evaluate(x=[test1,test2],y=cats,batch_size=32)

Output when batch_size is 1

Epoch 1/100
3903/3903 [=========================] - 45s - loss: 15.7062 - acc: 0.0254
Epoch 2/100
3903/3903 [=========================] - 43s - loss: 15.7050 - acc: 0.0254
Epoch 3/100
277/3903 [=>.......................] - ETA: 42s - loss: 15.8272 - acc: 0.0181

Thanks very much for your time and help.

Updated: here is the Keras model structure that calls mergeLayer

def buildModel_merge(numClasses):
source = L.Input(shape=(64,25,1))
x = L.Conv2D(150, (3,3), activation='relu', name='conv1a')(source)
x = L.MaxPooling2D((2,2))(x)
x = L.BatchNormalization()(x)
x = L.Conv2D(150, (3,3), activation='relu', name='conv2a')(x)
x = L.Conv2D(150, (5,5), activation='relu', name='conv3a')(x)
x = L.Dropout(0.5)(x)
#reshape into a dxN matrix
x = L.Reshape((125,150))(x)
x = mergeLayer(100)(x)

source2 = L.Input(shape=(30,30,30,1))
x2 = L.Conv3D(32,(5,5,5),strides=(2,2,2),activation='relu',name='conv1b')(source2)
x2 = L.Dropout(0.2)(x2)
x2 = L.Conv3D(32,(3,3,3),activation='relu',name='conv2b')(x2)
x2 = L.MaxPooling3D(pool_size=(2,2,2),name='pool2b')(x2)
x2 = L.Dropout(0.3)(x2)
#reshape into a dxM matrix
x2 = L.Reshape((125,32))(x2)
x2 = mergeLayer(100)(x2)

#x = L.Multiply(x, x2)(x)
x = L.Multiply()([x,x2])

x = L.Flatten()(x)
x = L.Dense(400, activation='relu', name='dense1')(x) # Is relu used here?
x = L.Dropout(0.5)(x)
classify = L.Dense(numClasses, activation='softmax', name='dense2')(x)

model = M.Model(inputs=[source,source2],outputs=classify)
optimizer= O.SGD(momentum=0.02)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])

return model

Solution

  • Here are some corrections in your code:

    • You don't need the output_dim and the **kwargs arguments
    • Instead of using expand_dims on the kernel, I already defined it with the extra dimension (but your keras version seem to behave different from mine, so use the #alternative lines of code).
    • Main problem: the batch_dot is expecting two tensors with the same batch size (that means: the first dimension must be the same)
      • Solved it by repeating the kernel to fit the batch size of x
    • Exchanged all tf functions with keras backend functions (import keras.backend as K) - This is not a problem, but you may port this solution to other supported backends with this.

    .

    class mergeLayer(Layer):
    
        #your init doesn't need output_dim and **kwargs
        def __init__(self):
            super(mergeLayer,self).__init__()
            self.kernel_initializer = INIT.get('uniform')
    
        def build(self, input_shape):
            # Create a trainable weight variable for this layer.
            self.kernel = self.add_weight(
                              name='kernel',
    
                              #corrected shape to avoid expand_dims
                              shape=(1,)+input_shape[1:],
                                  #alternative:
                                  #shape = input_shape[1:],
                              initializer=self.kernel_initializer,trainable=True)
    
            super(mergeLayer,self).build(input_shape) # Be sure to call this somewhere!
    
        def call(self, x):
            #take a tensor of ones with the same shape as x
            form = K.ones_like(x)
    
            #multiplies the kernel to match the batch size of x
            kernel = form * self.kernel
                #alternative:
                #kernel = form * K.expand_dims(self.kernel,0)
    
            #used K.permute_dimensions instead of tf
            temp = K.batch_dot(kernel,K.permute_dimensions(x,(0,2,1)))+1
            return temp
    
        def compute_output_shape(self, input_shape):
            return input_shape