I know there is tf.keras.metrics.Precision()
,tf.keras.metrics.TruePositives()
, tf.keras.metrics.FalsePositives()
.But how to implement the output of these built in metrics in a custom metric function?
here is my working code:
import tensorflow_addons as tfa
import tensorflow as tf
import autokeras as ak
def f1_loss(y_true, y_pred): # not coded by me
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1)
return 1- K.mean(f1)
length=100000;WIDTH = 3; HEIGHT=3;CLASSES=2
X=np.random.random((length,HEIGHT,WIDTH)).astype(np.float32)
Y_float=np.ones((length,CLASSES)).astype(np.float32)
for i in range (length):
Y_float[i]=np.array([np.mean( X[i]),np.mean( X[i])/2])
Y_binary= (Y_float>=0.5).astype(np.int32)
input_node = ak.Input()
output_node=ak.DenseBlock()(input_node)
Classification_output = ak.ClassificationHead(loss=f1_loss,metrics=[tfa.metrics.F1Score(num_classes=2),
tf.keras.metrics.TruePositives(), tf.keras.metrics.FalsePositives()],multi_label=True)(output_node)
auto_model= ak.AutoModel( inputs=[input_node], outputs=[Classification_output], max_trials=1,overwrite=True)
ak_history=auto_model.fit(x=[X],y=Y_binary,validation_split=0.2 )
Searching for the best model and training is realy good, despite thatf1_loss
never equals tfa.metrics.F1Score
or 1-tfa.metrics.F1Score
. The real problem is that I need to add a metric that could be used later in searching for the best model.
def diff(y_true, y_pred): # the new custom metric I would like to add
d=tf.keras.metrics.TruePositives()- tf.keras.metrics.FalsePositives()
return d
Now if updating metrics to be
metrics=[diff,tfa.metrics.F1Score(num_classes=2),tf.keras.metrics.TruePositives(), tf.keras.metrics.FalsePositives()]
I got the error:
TypeError: in user code:
/opt/conda/lib/python3.7/site-packages/keras/engine/training.py:853 train_function *
return step_function(self, iterator)
/tmp/ipykernel_33/1113116857.py:16 diff *
d=tf.keras.metrics.TruePositives()- tf.keras.metrics.FalsePositives()
TypeError: unsupported operand type(s) for -: 'TruePositives' and 'FalsePositives'
a custom metric here was adapted to my requirement.
class diff(keras.metrics.Metric):
def __init__(self, name = 'diff', **kwargs):
super(diff, self).__init__(**kwargs)
self.tp = self.add_weight('tp', initializer = 'zeros')
self.fp = self.add_weight('fp', initializer = 'zeros')
def update_state(self, y_true, y_pred,sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.math.greater(y_pred, 0.5)
true_p = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
false_p = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, True))
self.tp.assign_add(tf.reduce_sum(tf.cast(true_p, self.dtype)))
self.fp.assign_add(tf.reduce_sum(tf.cast(false_p, self.dtype)))
def reset_state(self):
self.tp.assign(0)
self.fp.assign(0)
def result(self):
return self.tp - self.fp