Search code examples
tensorflowkerasdeep-learningautoencoderimage-generation

Keras Variational Autoencoder with ImageDataGenerator returns InvalidArgumentError: Graph execution error


I try to build an autoencoder for image generation, but my first prototype refuse to work. The main problem gives the data generator in vae.fit(image_generator). It should fill training and validation but there are some problems of input or output shapes.

Model code:

def sampling(args):
    mu, log_var = args
    batch_size = K.shape(mu)[0]
    dim = K.int_shape(mu)[1]
    epsilon = K.random_normal(shape=(batch_size, dim), mean=0., stddev=1.0)
    return K.exp(0.5 * log_var) * epsilon + mu

def build_vae(input_count,neuron_count_per_hidden_layer,encoded_dim,hidden_activation,output_activation):
    #Encoder
    encoder_input = layers.Input(shape=(80,60,1), name='encoder_input')
    prev_layer=encoder_input

    prev_layer = layers.Flatten()(prev_layer)
    prev_layer = layers.Dense(256,activation=hidden_activation)(prev_layer)
    for neuron_count in neuron_count_per_hidden_layer:
      hidden_layer=layers.Dense(neuron_count,activation=hidden_activation)(prev_layer)
      prev_layer=hidden_layer
    
    mu = layers.Dense(encoded_dim, name='mu')(prev_layer)
    log_var = layers.Dense(encoded_dim, name='log_var')(prev_layer)
    
    encoder = keras.Model(encoder_input, [mu, log_var], name='encoder')

    #Decoder
    decoder_input = layers.Input(shape=(encoded_dim,), name='decoder_input')

    prev_layer=decoder_input
    for neuron_count in reversed(neuron_count_per_hidden_layer):
      hidden_layer=layers.Dense(neuron_count,activation=hidden_activation)(prev_layer)
      prev_layer=hidden_layer

    prev_layer = layers.Dense(4800,activation=hidden_activation)(prev_layer)
    decoder_output_layer=layers.Reshape((80,60,1), name='decoder_output')(prev_layer)

    decoder = keras.Model(decoder_input, decoder_output_layer, name='decoder')

    #Sampling layer
    s = layers.Lambda(sampling, output_shape=(encoded_dim,), name='s')([mu, log_var])

    #VAE
    vae=keras.Model(encoder.input, decoder(s),name='vae')
    
    return vae,encoder,decoder

def vae_loss(vae_input,vae_ouput,mu,log_var,kl_coefficient, input_count):
  #Reconstruction loss
  reconstruction_loss = keras.losses.mean_squared_error(vae_input,vae_ouput) * input_count

  #Regularization loss
  kl_loss = 0.5 * K.sum(K.square(mu) + K.exp(log_var) - log_var - 1, axis = -1)

  #Combined loss
  return reconstruction_loss + kl_coefficient*kl_loss

def getVAE(shape):
   vae, vae_encoder, vae_decoder=build_vae(shape, [128,64], 16,'sigmoid','sigmoid')
   vae.summary()
   return vae, vae_encoder, vae_decoder

It gives this model summary: Model summary


Then I create the model and image generator and tried to train the model:

datagen = ImageDataGenerator(
    rescale=1.0 / 255.0,  # Scale pixel values between 0 and 1
) 

data_dir = paths.BW_IMG_FOLDER
batch_size = 128

image_generator = datagen.flow_from_directory(
    data_dir,
    color_mode="grayscale",
    target_size=(80, 60),  # Set your desired image dimensions
    batch_size=batch_size,
    class_mode='input',  # No class labels, unsupervised learning
    shuffle=True  # Shuffle the data
)


importlib.reload(v1) # the module "v1" was described in previous code sample
vae, vae_encoder, vae_decoder = v1.getVAE(4800)


kl_coefficient=1

#Information needed to compute the loss function
vae_input=vae.input
vae_output=vae.output
mu=vae.get_layer('mu').output
log_var=vae.get_layer('log_var').output

vae.add_loss(v1.vae_loss(vae_input,vae_output,mu,log_var,kl_coefficient,4800))
vae.compile(optimizer='adam')

epoch_count = 100
patience=5

early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True)

history = vae.fit(image_generator,epochs=epoch_count,callbacks=[early_stop])

But as the result I get an error:

InvalidArgumentError                      Traceback (most recent call last)
c:\Users\dengr\Desktop\Fashion-Product-Images-Generation\src\main.py in line 8
      133 patience=5
      135 early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True)
----> 137 history = vae.fit(image_generator,epochs=epoch_count,callbacks=[early_stop])

File c:\Users\dengr\anaconda3\envs\DL-10\lib\site-packages\keras\utils\traceback_utils.py:70, in filter_traceback..error_handler(*args, **kwargs)
     67     filtered_tb = _process_traceback_frames(e.__traceback__)
     68     # To get the full stack trace, call:
     69     # `tf.debugging.disable_traceback_filtering()`
---> 70     raise e.with_traceback(filtered_tb) from None
     71 finally:
     72     del filtered_tb

File c:\Users\dengr\anaconda3\envs\DL-10\lib\site-packages\tensorflow\python\eager\execute.py:54, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     52 try:
     53   ctx.ensure_initialized()
---> 54   tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
     55                                       inputs, attrs, num_outputs)
     56 except core._NotOkStatusException as e:
     57   if name is not None:

InvalidArgumentError: Graph execution error:
Detected at node 'vae/tf.__operators__.add_1/AddV2' defined at (most recent call last):
    File "c:\Users\user\anaconda3\envs\DL-10\lib\runpy.py", line 196, in _run_module_as_main
      return _run_code(code, main_globals, None,
    File "c:\Users\user\anaconda3\envs\DL-10\lib\runpy.py", line 86, in _run_code
      exec(code, run_globals)
...
...
...
   File "c:\Users\user\anaconda3\envs\DL-10\lib\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
      return fn(*args, **kwargs)
    File "c:\Users\user\anaconda3\envs\DL-10\lib\site-packages\keras\layers\core\tf_op_layer.py", line 242, in _call_wrapper
      return self._call_wrapper(*args, **kwargs)
    File "c:\Users\user\anaconda3\envs\DL-10\lib\site-packages\keras\layers\core\tf_op_layer.py", line 279, in _call_wrapper
      result = self.function(*args, **kwargs)
Node: 'vae/tf.__operators__.add_1/AddV2'
required broadcastable shapes
     [[{{node vae/tf.__operators__.add_1/AddV2}}]] [Op:__inference_train_function_1412]

In the web I found answers only for convolutional classification networks but no one for 2D autoencoders


Solution

  • With the help of the MksimSH reponse I resolve the problem that was in the loss function code:

    def vae_loss(vae_input,vae_ouput,mu,log_var,kl_coefficient, input_count):
      #Reconstruction loss
      reconstruction_loss = keras.losses.mean_squared_error(vae_input,vae_ouput) * input_count
    
      #Regularization loss
      kl_loss = 0.5 * K.sum(K.square(mu) + K.exp(log_var) - log_var - 1, axis = -1)
    
      #Combined loss
      return reconstruction_loss + kl_coefficient*kl_loss
    

    I have to change the shapes of input layers because keras.losses.mean_squared_error() do not compute a single mean value for 2D data, instead it returns an array of values where each value is a mean of the input row. So the final working code is:

    def vae_loss(vae_input,vae_ouput,mu,log_var,kl_coefficient, input_count):
      #Reconstruction loss
      x = keras.layers.Reshape((input_count,))(vae_input)
      y = keras.layers.Reshape((input_count,))(vae_ouput)
      reconstruction_loss = keras.losses.mean_squared_error(x, y) * input_count
    
      #Regularization loss
      kl_loss = 0.5 * K.sum(K.square(mu) + K.exp(log_var) - log_var - 1, axis = -1)
    
      #Combined loss
      return reconstruction_loss + kl_coefficient*kl_loss