I'm trying to do a prediction on new text examples, where I want the prediction to return a probability output for each example.
This is my learning model:
with tf.name_scope('Placeholders'):
input_x = tf.placeholder(tf.int32, [None, sequence_length], name='input_x')
input_y = tf.placeholder(tf.float32, [None, n_classes], name='input_y')
drops = tf.placeholder(tf.float32, name='dropout_keep_prob')
with tf.name_scope('Embedding_layer'):
embeddings_v = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embeddings = tf.nn.embedding_lookup(params=embeddings_v, ids=input_x)
# Bi_directional LSTM
with tf.name_scope('Bi_directional_LSTM'):
rnn_outputs, _ = bi_rnn(LSTMCell(hidden_unit), LSTMCell(hidden_unit),inputs=embeddings, dtype=tf.float32)
tf.summary.histogram('Bi_directional_LSTM', rnn_outputs)
# Attention layer
with tf.name_scope('Attention_layer'):
attention_output, alphas = attention(rnn_outputs, attention_size, return_alphas=True)
tf.summary.histogram('alphas', alphas)
with tf.name_scope('Dropout'):
drop = tf.nn.dropout(attention_output, drops)
with tf.name_scope('Fully_connected_layer'):
W = tf.Variable(tf.truncated_normal([hidden_unit * 2, n_classes],stddev=0.1))
b = tf.Variable(tf.constant(0., shape=[n_classes]))
y_hat = tf.nn.xw_plus_b(drop, W,b)
predictions = tf.argmax(input=y_hat, axis=1, name='predictions')
with tf.name_scope('Loss'):
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_hat, labels=input_y))
global_step = tf.Variable(0.1, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss, global_step=global_step)
probs = tf.nn.sigmoid(y_hat)
tf.summary.scalar('loss', loss)
with tf.name_scope('Accuracy'):
correct_pred = tf.equal(tf.argmax(y_hat,1), tf.argmax(input_y,1))
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_pred, tf.float32), name='accuracy')
num_correct = tf.reduce_sum(input_tensor=tf.cast(correct_pred, 'float'), name='correct_predictions')
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
I then run the following piece of code to train the model:
def batch_generator(X, y, batch_size):
"""batch generator"""
size = X.shape[0]
X_copy = X.copy()
y_copy = y.copy()
indices = np.arange(size)
np.random.shuffle(indices)
X_copy = X_copy[indices]
y_copy = y_copy[indices]
i = 0
while True:
if i + batch_size <= size:
yield X_copy[i:i + batch_size], y_copy[i:i + batch_size]
i += batch_size
else:
i = 0
indices = np.arange(size)
np.random.shuffle(indices)
X_copy = X_copy[indices]
y_copy = y_copy[indices]
continue
train_batch_generator = batch_generator(x_train, y_train, batch_size)
test_batch_generator = batch_generator(x_dev, y_dev, batch_size)
predict_generator = batch_generator(x_test, y_test, batch_size)
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
sess.run(tf.global_variables_initializer())
print("Start learning...")
for epoch in range(epochs):
loss_train = 0
loss_val = 0
loss_test = 0
accuracy_train = 0
accuracy_val = 0
accuracy_test = 0
train_loss_l = []
val_loss_l = []
print("epoch: {}\t".format(epoch), end="")
# Training
num_batches = x_train.shape[0] // batch_size
for b in tqdm(range(num_batches)):
x_batch, y_batch = next(train_batch_generator)
loss_tr, acc, _, summary = sess.run([loss, accuracy, optimizer,merged],
feed_dict={input_x: x_batch,
input_y: y_batch,
drops: 0.5})
train_loss_l.append(loss_tr)
accuracy_train += acc
loss_train = loss_tr * DELTA + loss_train * (1 - DELTA)
accuracy_train /= num_batches
# Validation
num_batches = x_dev.shape[0] // batch_size
for b in tqdm(range(num_batches)):
x_batch, y_batch = next(test_batch_generator)
val_loss, val_acc, summary = sess.run([loss, accuracy,merged],
feed_dict={input_x: x_batch,
input_y: y_batch,
drops: 0.5})
val_loss_l.append(val_loss)
accuracy_val += val_acc
loss_val += val_loss
accuracy_val /= num_batches
loss_val /= num_batches
print("loss: {:.3f}, val_loss: {:.3f}, acc: {:.3f}, val_acc: {:.3f}".format(loss_train, loss_val, accuracy_train, accuracy_val))
# predict x_test
num_batches = x_test.shape[0] // batch_size
print("n batches",num_batches)
predict_correct = 0
for batch in tqdm(range(num_batches)):
x_batch, yx_batch = next(predict_generator)
y_true = np.argmax(yx_batch,1)
loss_pred, acc_pred, n_correct, y_pred = sess.run([loss,accuracy,num_correct,predictions], feed_dict={input_x: x_batch,input_y: y_batch,drops : 0.5 })
print("Precision", sk.metrics.precision_score(y_true, y_pred,average='weighted'))
print("Recall", sk.metrics.recall_score(y_true, y_pred,average='weighted'))
print("f1_score", sk.metrics.f1_score(y_true, y_pred,average='weighted'))
print("confusion_matrix")
print(sk.metrics.confusion_matrix(y_true, y_pred))
saver.save(sess, MODEL_PATH)
sess.run(predictions, feed_dict={x: x_test})
The code runs fine until it hits the prediction part where I get the following error:
TypeError: unhashable type: 'numpy.ndarray'
Any chance someone could explain this to me please?
Just answered my own question...
This is just in case someone else is attempting to do this. If you feed tensors to feed_dict, just use the actual placeholder name you used before.
In my case this would be:
print(sess.run(predictions, feed_dict={input_x: x_test,drops:0.5}))