Search code examples
pythonkerasdeep-learningneural-networklstm

Python can't apply fit_generator to keras model with multiple input


I have the following model - this is LSTM + CNN with 3 inputs. enter image description here

And I built this generator function to train the model using fit_generator (based on this: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly):

class MultiInputDataGenerator(keras.utils.Sequence):
    'Generates data for Keras'

    def __init__(self, list_IDs, labels, shuffle=True):
        'Initialization'
        self.batch_size = 8
        self.labels = labels
        self.list_IDs = list_IDs
        self.n_classes = 5
        self.shuffle = shuffle
        self.on_epoch_end()

def __len__(self):
    'Denotes the number of batches per epoch'
    return int(np.floor(len(self.list_IDs) / self.batch_size))

def __getitem__(self, index):
    'Generate one batch of data'
    # Generate indexes of the batch
    indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

    # Find list of IDs
    list_IDs_temp = [self.list_IDs[k] for k in indexes]

    # Generate data
    X, y = self.__data_generation(list_IDs_temp)

    return X, y

def on_epoch_end(self):
    'Updates indexes after each epoch'
    self.indexes = np.arange(len(self.list_IDs))
    if self.shuffle == True:
        np.random.shuffle(self.indexes)

def __data_generation(self, list_IDs_temp):
    'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
    # Initialization
    
    X = np.empty((self.batch_size, 1, 3), dtype=object)
    y = np.empty((self.batch_size), dtype=object)

    # Generate data
    for i, ID in enumerate(list_IDs_temp):
        X_id = []
        x_features = df.iloc[id][et_cols].values #ET_COLS are 14 columns so I get 1X14 here
        x_text = df.iloc[id].text_col #x_text is 1X768
        x_vid = df.iloc[id].frame_col #x_vid is (3,244,244)
        
        X_id.append(x_features)
        X_id.append(x_text) 
        X_id.append(x_vid)
           
        X[i,] = X_id
        y[i] = self.labels[ID]

    y_mat = tf.convert_to_tensor(pd.get_dummies(y))
    return X, y_mat


training_generator = MultiModelDataGenerator(generator_partition['train'], generator_labels)
validation_generator = MultiModelDataGenerator(generator_partition['val'], generator_labels)
net = build_LSTMCNN_net()
net.compile(keras.optimizers.Adam(0.001),'categorical_crossentropy',metrics=['acc'])
net.fit_generator(generator=training_generator,
                    validation_data=validation_generator,)
                    use_multiprocessing=True)#,    workers=6)

And I get the error:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-38-669153f703e6> in <module>()
      
      net.fit_generator(generator=training_generator,
--->                      validation_data=validation_generator,)
                          #use_multiprocessing=True)#,    workers=6)

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
     96       dtype = dtypes.as_dtype(dtype).as_datatype_enum
     97   ctx.ensure_initialized()
---> 98   return ops.EagerTensor(value, ctx.device_name, dtype)
     99 
    100 

ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type numpy.ndarray).

I also tried several variations such as adding:

x_features = np.asarray(x_features).astype(object)
x_text = np.asarray(x_text).astype(object)
x_vid = np.asarray(x_text).astype(object)

Or X[i,] = [X_id] instead of X[i,] = X_id But none worked Any idea how to fix the problem?

Edited: When adding:

astype(np.float32) 

and
tf.convert_to_tensor(X)

I get the error: ValueError Traceback (most recent call last) in ()

      net.fit_generator(generator=training_generator,
--->                      validation_data=validation_generator,
                          use_multiprocessing=True,    workers=6)


/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
            dtype = dtypes.as_dtype(dtype).as_datatype_enum
        ctx.ensure_initialized()
--->    return ops.EagerTensor(value, ctx.device_name, dtype)
  

Solution

  • Before solving the problem, let's first summarize the dataset that you're working with. Based on your description, I created an example DataFrame that might resemble yours

    import pandas as pd
    
    dataset_size = 500
    train_idx,val_idx = train_test_split(range(dataset_size),test_size=0.2,) 
    
    # create an example DataFrame that I assume will be resemble yours 
    example_df = pd.DataFrame({'vids':np.random.randint(0,10000,dataset_size)})
    # create feature columns 
    for ind in range(14): example_df['feature_%i' % ind] = np.random.rand(dataset_size)
    # each cell contains a list 
    example_df['text'] = np.random.randint(dataset_size)
    example_df['text'] = example_df['text'].astype('object')
    for ind in range(dataset_size):example_df.at[ind,'text'] = np.random.rand(768).tolist()
    # create the label column
    example_df['label'] = np.random.randint(low=0,high=5,size=dataset_size)
    
    # extract information from the dataframe, and create data generators 
    all_vids = example_df['vids'].values
    feature_columns = ['feature_%i' % ind for ind in range(14)]
    all_features = example_df[feature_columns].values
    all_text = example_df['text'].values
    all_labels = example_df['label'].values
    

    As you can see, the column text is a column of lists, in which each list contains 768 items. The column labels contains the labels of the examples, it doesn't matter whether you use one-hot encoding or other types of encoding, as long as its shape matches the shape of the output layer of the overall neural network model. The column vids is a column of seeds for generating random images on the fly.


    Solving the problem (based on the above dataset)

    You can use this syntax return {'feature':features,'text':text,'vid':vid},y for the method __getitem__, instead of stacking the three input arrays.

    To explain this, let's first construct a toy model resembles yours

    from tensorflow.keras.models import Model
    from tensorflow.keras.layers import Input,Dense,Flatten,Add
    
    
    def features_part(x):
        y = Dense(14)(x)
        y = Dense(10,activation='linear')(y)
        return y
    
    def text_part(x):
        y = Dense(768)(x)
        y = Dense(10,activation='linear')(y)
        return y
    
    def vid_part(x):
        y = Flatten()(x)
        y = Dense(10,activation='linear')(y)
        return y
    
    input_features = Input(shape=(14,),name='feature')
    input_text = Input(shape=(768,),name='text')
    input_vid = Input(shape=(3,244,244,),name='vid')
    
    feature_block = features_part(input_features)
    text_block = text_part(input_text)
    vid_block = vid_part(input_vid)
    added = Add()([feature_block,text_block,vid_block])
    # you have five classes at the end of the day 
    pred = Dense(1)(added)
    # build model
    model = Model(inputs=[input_features,input_text,input_vid],outputs=pred)
    model.compile(loss='mae',optimizer='adam',metrics=['mae'])
    

    The most important thing about this model is, I specified the names of the three input layers

    input_features = Input(shape=(14,),name='feature')
    input_text = Input(shape=(768,),name='text')
    input_vid = Input(shape=(3,244,244,),name='vid')
    

    For this model, you can construct a generator like

    # provide a seed for generating a random image 
    def fn2img(seed):
        np.random.seed(seed)
        # fake an image with three channels 
        return np.random.randint(low=0,high=255,size=(3,244,244))
    
    
    class MultiInputDataGenerator(keras.utils.Sequence):
    
        def __init__(self, 
                     all_inds,labels, 
                     features,text,vid, 
                     shuffle=True):
            self.batch_size = 8
            self.labels = labels
            self.all_inds = all_inds
            self.shuffle = shuffle
            self.on_epoch_end()
            
            self.features = features
            self.text = text
            self.vid = vid
    
        def __len__(self): 
            return int(np.floor(len(self.all_inds) / self.batch_size))
    
    
        def __getitem__(self,index):
            indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
            batch_indices = [self.all_inds[k] for k in indexes]
            features,text,vid,y = self.__data_generation(batch_indices)
    
            return {'feature':features,'text':text,'vid':vid},y
    
        def on_epoch_end(self):
            self.indexes = np.arange(len(self.all_inds))
            if self.shuffle == True:
                np.random.shuffle(self.indexes)
    
        def __data_generation(self,batch_indices):
            # Generate data
            features = self.features[batch_indices,:]
            # note that you need to stack the slice in order to reshape it to (num_samples,768)
            text = np.stack(self.text[batch_indices])
            # since batch_size is not a super large number, you can stack here
            vid = np.stack([fn2img(seed) for seed in self.vid[batch_indices]])
            y = self.labels[batch_indices]
    
            return features,text,vid,y
    

    as you can see, the __getitem__ method returns a dictionary {'feature':features,'text':text,'vid':vid},y. The keys of the dictionary match the names of the three input layers. Moreover, the random images are generated on the fly.

    In order to make sure everything works, you can run the script below,

    import numpy as np
    import pandas as pd
    from tensorflow import keras 
    from sklearn.model_selection import train_test_split
    
    from tensorflow.keras.models import Model
    from tensorflow.keras.layers import Input,Dense,Flatten,Add
    
    
    # provide a seed for generating a random image
    def fn2img(seed):
        np.random.seed(seed)
        # fake an image with three channels
        return np.random.randint(low=0,high=255,size=(3,244,244))
    
    
    class MultiInputDataGenerator(keras.utils.Sequence):
    
        def __init__(self,
                     all_inds,labels,
                     features,text,vid,
                     shuffle=True):
            self.batch_size = 8
            self.labels = labels
            self.all_inds = all_inds
            self.shuffle = shuffle
            self.on_epoch_end()
            
            self.features = features
            self.text = text
            self.vid = vid
    
        def __len__(self):
            return int(np.floor(len(self.all_inds) / self.batch_size))
    
    
        def __getitem__(self,index):
            indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
            batch_indices = [self.all_inds[k] for k in indexes]
            features,text,vid,y = self.__data_generation(batch_indices)
    
            return {'feature':features,'text':text,'vid':vid},y
    
        def on_epoch_end(self):
            self.indexes = np.arange(len(self.all_inds))
            if self.shuffle == True:
                np.random.shuffle(self.indexes)
    
        def __data_generation(self,batch_indices):
            # Generate data
            features = self.features[batch_indices,:]
            # note that you need to stack the slice in order to reshape it to (num_samples,768)
            text = np.stack(self.text[batch_indices])
            # since batch_size is not a super large number, you can stack here
            vid = np.stack([fn2img(seed) for seed in self.vid[batch_indices]])
            y = self.labels[batch_indices]
    
            return features,text,vid,y
    
    
    # fake a dataset
    dataset_size = 500
    train_idx,val_idx = train_test_split(range(dataset_size),test_size=0.2,)
    
    # create an example DataFrame that I assume will be resemble yours
    example_df = pd.DataFrame({'vids':np.random.randint(0,10000,dataset_size)})
    # create feature columns
    for ind in range(14): example_df['feature_%i' % ind] = np.random.rand(dataset_size)
    # each cell contains a list
    example_df['text'] = np.random.randint(dataset_size)
    example_df['text'] = example_df['text'].astype('object')
    for ind in range(dataset_size):example_df.at[ind,'text'] = np.random.rand(768).tolist()
    # create the label column
    example_df['label'] = np.random.randint(low=0,high=5,size=dataset_size)
    
    # extract information from the dataframe, and create data generators
    all_vids = example_df['vids'].values
    feature_columns = ['feature_%i' % ind for ind in range(14)]
    all_features = example_df[feature_columns].values
    all_text = example_df['text'].values
    all_labels = example_df['label'].values
    
    training_generator = MultiInputDataGenerator(train_idx,all_labels,all_features,all_text,all_vids)
    
    # create model
    def features_part(x):
        y = Dense(14)(x)
        y = Dense(10,activation='linear')(y)
        return y
    
    def text_part(x):
        y = Dense(768)(x)
        y = Dense(10,activation='linear')(y)
        return y
    
    def vid_part(x):
        y = Flatten()(x)
        y = Dense(10,activation='linear')(y)
        return y
    
    input_features = Input(shape=(14,),name='feature')
    input_text = Input(shape=(768,),name='text')
    input_vid = Input(shape=(3,244,244,),name='vid')
    
    feature_block = features_part(input_features)
    text_block = text_part(input_text)
    vid_block = vid_part(input_vid)
    added = Add()([feature_block,text_block,vid_block])
    # you have five classes at the end of the day 
    pred = Dense(1)(added)
    # build model
    model = Model(inputs=[input_features,input_text,input_vid],outputs=pred)
    model.compile(loss='mae',optimizer='adam',metrics=['mae'])
    
    model.fit_generator(generator=training_generator,epochs=10)
    
    print(model.history.history)