Search code examples
pythontensorflowkeraslstm

Fine-tuning Universal Sentence Encoder with LSTM


The input data:

string_1_A, string_2_A, string_3_A, label_A
string_1_B, string_2_B, string_3_B, label_B
...
string_1_Z, string_2_Z, string_3_Z, label_Z

and I would like to use Universal Sentence Encoder (v4) to get an embedding of that string (will be sentences) and then feed it into LSTM to make a prediction about that sequence. I end up with code below:

import tensorflow_hub as hub
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import LSTM


module_url = "../resources/embeddings/use-4"

def get_lstm_model():
    embedding_layer = hub.KerasLayer(module_url)

    inputs = tf.keras.layers.Input(shape=(3, ), dtype=tf.string)
    x = tf.keras.layers.Lambda(lambda y: tf.expand_dims(embedding_layer(tf.squeeze(y)), 1))(inputs)
    x = LSTM(128, return_sequences=False)(x)
    outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)

    model = tf.keras.Model(inputs=inputs, outputs=outputs)
    model.compile("adam",  K.binary_crossentropy)
    model.summary()
    return model


if __name__ == '__main__':
    model = get_lstm_model()
    print(model.predict([[["a"], ["b"], ["c"]]]))

the problem is that the dimension of input/output of certain layers does not match as I expected (instead of 1 I would expect 3):

input_1 (InputLayer)         [(None, 3)]               0         
_________________________________________________________________
lambda (Lambda)              (None, ***1***, 512)            0       

Any suggestion - I think I need to better handle squeezing and unsqueezing.


Solution

  • The simplest solution is to pass each string/sentence separately into the Universal Sentence Encoder. This produces an embedding for each string/sentence of shape 512 that can be concatenated to form a tensor of shape (None, n_sentences, 512).

    This is the code of the model:

    n_sentences = 50
    module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
    
    def get_lstm_model():
        embedding_layer = hub.KerasLayer(module_url, trainable=True)
    
        input = Input(shape=(n_sentences,), dtype=tf.string)
        x = [Reshape((1,512))(embedding_layer(input[:, s])) for s in range(n_sentences)]
        x = Concatenate(axis=1)(x)
        x = LSTM(128, return_sequences=False)(x)
        output = Dense(1, activation="sigmoid")(x)
    
        model = Model(inputs=input, outputs=output)
        model.compile("adam", "binary_crossentropy")
        model.summary()
        return model
    

    At inference time:

    sentences = [str(i) for i in range(n_sentences)]
    X = [sentences] # 1 sample
    print(model.predict(X).shape)
    
    X = [sentences, sentences[::-1]] # 2 samples
    print(model.predict(X).shape)
    

    Here the running notebook