I want to use Bert language model for training a multi class text classification task. Previously I trained using LSTM without any Error but Bert gives me this Error. I get this Error as follwoing and I really don't know how to solve it, can any one help me please?
Unfortunately there is very few documentation using Bert in keras library.
The Error:
TypeError Traceback (most recent call last)
<ipython-input-177-7b203e5e7f55> in <module>()
3
4 model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate= 2e-5),
----> 5 loss = tf.keras.losses.sparse_categorical_crossentropy(from_logits=True),
6 metrics = [tf.keras.metrics.categorical_accuracy()])
7 model.summary
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
TypeError: sparse_categorical_crossentropy() missing 2 required positional arguments: 'y_true' and 'y_pred'
SEARCH
Create tf.data.Datasets for Training and Evaluation
X = data.text_cleaned
Y = data.label
train_df, remaining = train_test_split(data, train_size=0.8, random_state=42)
valid_df, _ = train_test_split(remaining, random_state=42)
print(train_df.shape)
print(valid_df.shape)
with tf.device('/cpu:0'):
train_data = tf.data.Dataset.from_tensor_slices((train_df["text_cleaned"].values,
train_df["label"].values ))
valid_data = tf.data.Dataset.from_tensor_slices((valid_df["text_cleaned"].values,
valid_df["label"].values ))
for text, label in train_data.take(2):
print(text)
print(label)
Download a Pre-trained BERT Model from TensorFlow Hub
label_list = [0.0, 1.0, 2.0] # Label categories
max_seq_length = 64 # maximum length of (token) input sequences
train_batch_size = 32
# Get BERT layer and tokenizer:
# More details here: https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2",
trainable= True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
tokenizer.wordpiece_tokenizer.tokenize('Hi, how are you doing?')
Tokenize and Preprocess Text for BERT
# This provides a function to convert row to input features and label
def to_feature(text, label, label_list=label_list, max_seq_length=max_seq_length, tokenizer=tokenizer):
example = classifier_data_lib.InputExample(quit = None,
text_a= text.numpy(),
text_b= None,
label= label.numpy())
feature = classifier_data_lib.convert_single_example(0, example, label_list, max_seq_length,tokenizer)
return (feature.input_ids, feature.input_mask, feature.segment_ids, feature.label_id )
Wrap a Python Function into a TensorFlow op for Eager Execution
def to_feature_map(text, label):
input_ids, input_mask, input_type_ids, label_id = tf.py_function(to_feature,
inp = [text, label],
Tout = [tf.int32,
tf.int32,
tf.int32,
tf.int32])
input_ids.set_shape([max_seq_length])
input_mask.set_shape([max_seq_length])
input_type_ids.set_shape([max_seq_length])
label_id.set_shape([])
x= {
'input_word_ids':input_ids,
'input_mask' : input_mask,
'input_type_ids' : input_type_ids,
}
return (x, label_id)
Create a TensorFlow Input Pipeline with tf.data
with tf.device('/cpu:0'):
train_data = (train_data.map(to_feature_map,
num_parallel_calls = tf.data.experimental.AUTOTUNE)
.shuffle(1000)
.batch(32, drop_remainder = True)
.prefetch(tf.data.experimental.AUTOTUNE))
valid_data = (valid_data.map(to_feature_map,
num_parallel_calls = tf.data.experimental.AUTOTUNE)
.shuffle(1000)
.batch(32, drop_remainder = True)
.prefetch(tf.data.experimental.AUTOTUNE))
Add a Classification Head to the BERT Layer
def create_model():
input_word_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_word_ids")
input_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_mask")
input_type_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_type_ids")
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, input_type_ids])
drop = tf.keras.layers.Dropout(0.5)(pooled_output)
output = tf.keras.layers.Dense(3, activation='softmax',name = "output")(drop)
model = tf.keras.Model(
inputs = {
'input_word_ids':input_word_ids,
'input_mask' : input_mask,
'input_type_ids' : input_type_ids},
outputs = output)
return model
Fine-Tune BERT for Text Classification
model = create_model()
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate= 2e-5),
loss = tf.keras.losses.sparse_categorical_crossentropy(),
metrics = [tf.keras.metrics.categorical_accuracy()])
model.summary
It is solved by using :
model = create_model()
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate= 2e-5),
loss = tf.keras.losses.SparseCategoricalCrossentropy(),
metrics = [tf.keras.metrics.Accuracy()])
model.summary()