I'm trying to create a customized loss function to use in a neural network algorithm. My loss function:
import keras.backend as kb
import tensorflow as tf
def sign_penalty(y_true, y_pred):
penalty=0.5
loss=tf.where(tf.less(y_pred/y_true, 1),
penalty*(1-tf.dtypes.cast((((tf.unique_with_counts(y_pred >= y_true)[2])/(kb.sum(tf.unique_with_counts(y_pred >= y_true)[2])))[0]),tf.float32))+kb.sum((y_true - y_pred)**2/y_true),
kb.square(kb.sum((y_true - y_pred)**2/y_true)))
return(loss)
The model used:
model_mlp = Sequential()
model_mlp.add(Dense(100, activation='relu', input_dim=X_train.shape[1]))
model_mlp.add(Dense(1))
model_mlp.compile(loss=sign_penalty, optimizer=adam)
model_mlp.summary()
When I fit the model:
mlp_history = model_mlp.fit(X_train.values, Y_train, validation_data=(X_valid.values, Y_valid), epochs=epochs, verbose=2)
I get the following error:
InvalidArgumentError: unique expects a 1D vector. [[node sign_penalty/UniqueWithCounts (defined at :3) ]] [Op:__inference_train_function_773] Function call stack: train_function
I think that the error comes from the penalty, but I don't know why.
You are passing 3D vector to unique_with_counts
. Only 1D can be passed.
Try this simple approach to calculate percentage:
def sign_penalty(y_true, y_pred):
penalty=0.5
a = tf.cast((y_pred >= y_true)[2], tf.float32)
s = kb.sum(a)
percentage = a / s
loss=tf.where(tf.less(y_pred/y_true, 1),
penalty*(1-percentage)+kb.sum((y_true - y_pred)**2/y_true),
kb.square(kb.sum((y_true - y_pred)**2/y_true)))
return(loss)