Search code examples
pythontensorflowtensorflow2.0tensorflow-datasets

Adapt a numerical tensorflow dataset as a textvector


consider the following code:

import numpy as np
import tensorflow as tf

simple_data_samples = np.array([
         [1, 1, 1, -1, -1],
         [2, 2, 2, -2, -2],
         [3, 3, 3, -3, -3],
         [4, 4, 4, -4, -4],
         [5, 5, 5, -5, -5],
         [6, 6, 6, -6, -6],
         [7, 7, 7, -7, -7],
         [8, 8, 8, -8, -8],
         [9, 9, 9, -9, -9],
         [10, 10, 10, -10, -10],
         [11, 11, 11, -11, -11],
         [12, 12, 12, -12, -12],
])

def timeseries_dataset_multistep_combined(features, label_slice, input_sequence_length, output_sequence_length, batch_size):
    feature_ds = tf.keras.preprocessing.timeseries_dataset_from_array(features, None, input_sequence_length + output_sequence_length, batch_size=batch_size)

    def split_feature_label(x):
        x=tf.strings.as_string(x)

        return x[:, :input_sequence_length, :], x[:, input_sequence_length:, label_slice]

    feature_ds = feature_ds.map(split_feature_label)

    return feature_ds

ds = timeseries_dataset_multistep_combined(simple_data_samples, slice(None, None, None), input_sequence_length=4, output_sequence_length=2,
batch_size=1)
def print_dataset(ds):
    for inputs, targets in ds:
        print("---Batch---")
        print("Feature:", inputs.numpy())
        print("Label:", targets.numpy())
        print("")



print_dataset(ds)

The tensorflow dataset "ds" consists of an input and target. I want to adapt the input and the target as a textvector. The following hypothetical code displays what I want to achieve:

input_vectorization = layers.TextVectorization(
    max_tokens=20,
    output_mode="int",
    output_sequence_length=6,
)
target_vectorization = layers.TextVectorization(
    max_tokens=20,
    output_mode="int",
    output_sequence_length=6 + 1
)

input_vectorization.adapt(ds.input)
target_vectorization.adapt(ds.target)

Any idea of how to code this using the aforementioned example?


Solution

  • If I understood you correctly, you can use your existing dataset with the TextVectorization layer like this:

    import tensorflow as tf
    
    input_vectorization = tf.keras.layers.TextVectorization(
        max_tokens=20,
        output_mode="int",
        output_sequence_length=6,
    )
    target_vectorization = tf.keras.layers.TextVectorization(
        max_tokens=20,
        output_mode="int",
        output_sequence_length=6 + 1
    )
    
    # Get inputs only and flatten them
    inputs = ds.map(lambda x, y: tf.reshape(x, (tf.math.reduce_prod(tf.shape(x)), )))
    
    # Get targets only and flatten them
    targets = ds.map(lambda x, y: tf.reshape(y, (tf.math.reduce_prod(tf.shape(y)), )))
    
    input_vectorization.adapt(inputs)
    target_vectorization.adapt(targets)
    print(input_vectorization.get_vocabulary())
    print(target_vectorization.get_vocabulary())
    
    ['', '[UNK]', '7', '6', '5', '4', '8', '3', '9', '2', '10', '1']
    ['', '[UNK]', '9', '8', '7', '6', '11', '10', '5', '12']
    

    Note that the adapt function simply creates a vocabulary based on the inputs and each word in the vocabulary is mapped to a unique integer value. Also, due to the default parameter standardize='lower_and_strip_punctuation' of the TextVectorization layer, the minus signs are removed when calling adapt. You can avoid this behavior, if you want, by setting for example standardize='lower'.