I'm trying to make a network similar to the one in the image, but I'm not sure how it's done.
I want it to receive only one input and then feed it to 2 subnetworks containing convolutional blocks. I wrote this code, but it's not working.
main_model = Sequential()
main_model.add(Convolution2D(filters=16, kernel_size=(2, 2), input_shape=(32, 32, 3)))
main_model.add(BatchNormalization())
main_model.add(Activation('relu'))
main_model.add(MaxPooling2D(pool_size=(2, 2)))
main_model.add(Convolution2D(filters=32, kernel_size=(2, 2)))
main_model.add(BatchNormalization())
main_model.add(Activation('relu'))
main_model.add(MaxPooling2D(pool_size=(2, 2)))
main_model.add(Convolution2D(filters=64, kernel_size=(2, 2)))
main_model.add(BatchNormalization())
main_model.add(Activation('relu'))
main_model.add(MaxPooling2D(pool_size=(2, 2)))
main_model.add(Flatten())
# lower features model - CNN2
lower_model = Sequential()
lower_model.add(Convolution2D(filters=16, kernel_size=(1, 1), input_shape=(32, 32, 3)))
lower_model.add(BatchNormalization())
lower_model.add(Activation('relu'))
lower_model.add(MaxPooling2D(pool_size=(2, 2)))
lower_model.add(Flatten())
lower_model.add(Convolution2D(filters=32, kernel_size=(1, 1)))
lower_model.add(BatchNormalization())
lower_model.add(Activation('relu'))
lower_model.add(MaxPooling2D(pool_size=(2, 2)))
lower_model.add(Convolution2D(filters=64, kernel_size=(1, 1)))
lower_model.add(BatchNormalization())
lower_model.add(Activation('relu'))
lower_model.add(MaxPooling2D(pool_size=(2, 2)))
lower_model.add(Flatten())
# merged model
merged_model = concatenate([main_model, lower_model])
final_model = Sequential()
final_model.add(merged_model)
final_model.add(Dense(32))
final_model.add(Activation('relu'))
final_model.add(Dropout(0.5))
final_model.add(Dense(1))
final_model.add(Activation('sigmoid'))
final_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
I'm getting this error:
ValueError: Input 0 of layer conv2d_4 is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: [None, 4096]
This is possible with using Keras Functional API You can do something like this
A_inputs = keras.Input(shape=(32, 32, 3))
B_inputs = keras.Input(shape=(32, 32, 3))
branchA = Convolution2D(filters=32, kernel_size=(1, 1))(A_inputs)
branchA = BatchNormalization()(branchA)
branchA = Activation('relu')(branchA)
branchA = MaxPooling2D(pool_size=(2, 2))(branchA)
branchA = Model(inputs=A_inputs, outputs=branchA)
branchB = Convolution2D(filters=32, kernel_size=(1, 1))(B_inputs)
branchB = BatchNormalization()(branchB)
branchB = Activation('relu')(branchB)
branchB = MaxPooling2D(pool_size=(2, 2))(branchB)
branchB = Model(inputs=B_inputs, outputs=branchB)
#you may need to make sure output size of branchA and branchB are same size
combined = concatenate([branchA.output, branchB.output])
combined = Dense(2, activation="relu")(combined)
combined = Dense(1, activation="softmax")(combined)
model = Model(inputs=[branchA.input, branchB.input], outputs=combined)
Here is another tutorial that uses multiple branches but does use two different inputs but the rough process is the same