I'm trying to create a LocallyConnected1D autoencoder in Keras by repurposing a 'simplest possible' Dense autoencoder from this tutorial.
I keep getting the error bellow, which I believe is being caused by the dimensions of my input_shape
.
Traceback (most recent call last):
File "localdendritic.py", line 38, in <module>
kernel_size=6)
File "localdendritic.py", line 15, in __init__
activation='relu')(input_placeholder)
File "/Users/me/anaconda3/lib/python3.6/site-packages/keras/engine/topology.py", line 573, in __call__
self.assert_input_compatibility(inputs)
File "/Users/me/anaconda3/lib/python3.6/site-packages/keras/engine/topology.py", line 472, in assert_input_compatibility
str(K.ndim(x)))
ValueError: Input 0 is incompatible with layer encoded_layer: expected ndim=3, found ndim=2
My code is below. I've tried changing the input_shape
array into [None, 1, input_size]
, [1, 1, input_size]
, [1, input_size]
and [None, input_size]
, but it doesn't seem to change anything. I think I'm missing some insight about the shape of the input.
import numpy as np
from keras.models import Model, Sequential
from keras.layers import Input, LocallyConnected1D
class Localautoencoder:
def __init__(self, input_size, encoded_size, kernel_size, **kwargs):
input_shape = np.array([input_size])
input_placeholder = Input(shape=(input_size, 1))
encoded = LocallyConnected1D(encoded_size, kernel_size,
input_shape=input_shape,
name='encoded_layer',
activation='relu')(input_placeholder)
decoded = LocallyConnected1D(input_size, kernel_size,
activation='sigmoid',
name='decoded_layer')(encoded)
self.localae = Model(input_placeholder, decoded)
self.encoder = Model(input_placeholder, encoded)
encoded_input = Input(shape=(1, encoded_size))
decoded_layer = self.localae.layers[-1]
self.decoder = Model(encoded_input, decoded_layer(encoded_input))
self.localae.compile(optimizer='adam', loss='binary_crossentropy')
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')/255.
x_test = x_test.astype('float32')/255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
lae = Localautoencoder(input_size=x_train.shape[1],
encoded_size=100,
kernel_size=6)
A LocallyConnected1D
layer takes a three-dimensional input, but input_placeholder is only two dimensions. A fix for this would be to add a Reshape
layer that would convert your 2D input into a 3D one.