Search code examples
pythonmachine-learningneural-networknltkrecurrent-neural-network

Generating Shakespearean Text Using a Character RNN


I am reading a ML book Hands on Machine Learning (2nd edition) there is a topic on page 526 Generating Shakespearean Text Using a Character RNN I am doing exactly what they are doing but at the time of training it showing TypeError. I did my best to solve this problem on my level.

TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'

Here is the code


    import tensorflow as tf
    from tensorflow import keras
    from nltk import tokenize
    import numpy as np
    
    shakespeare_url = "https://homl.info/shakespeare" # shortcut URL
    filepath = keras.utils.get_file("shakespeare.txt", shakespeare_url)
    with open(filepath) as f:
      shakespeare_text = f.read()
    
    tokenizer = keras.preprocessing.text.Tokenizer(char_level=True)
    tokenizer.fit_on_texts([shakespeare_text])
    
    max_id = len(tokenizer.word_index)
    dataset_size = tokenizer.document_count
    [encoded] = np.array(tokenizer.texts_to_sequences([shakespeare_text])) - 1
    print(dataset_size)
      
    train_size = dataset_size * 90 // 100
    dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])
    print(train_size)
    
    n_steps = 100
    window_length = n_steps + 1 # target = input shifted 1 character ahead
    dataset = dataset.window(window_length, shift=1, drop_remainder=True)
    dataset = dataset.flat_map(lambda window: window.batch(window_length))
    
    batch_size = 32
    dataset = dataset.shuffle(10000).batch(batch_size)
    dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:]))
    
    dataset = dataset.map(lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))
    
    dataset = dataset.prefetch(1)
    print(dataset)
    
    
    model = keras.models.Sequential([
      keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],
      dropout=0.2, recurrent_dropout=0.2),
      keras.layers.GRU(128, return_sequences=True,
      dropout=0.2, recurrent_dropout=0.2),
      keras.layers.TimeDistributed(keras.layers.Dense(max_id,activation="softmax"))
      ])
    
    model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
    history = model.fit(dataset, epochs=20)


Solution

  • The reason for this error is on this line:

    tokenizer.fit_on_texts([shakespeare_text])
    

    You put the whole text in one array, which is why dataset_size is 1.

    You should instead use this:

    tokenizer.fit_on_texts(shakespeare_text)