I would like to create a custom loss function for a tensorflow model using y_true and y_pred but I got the following error: ValueError: Cannot infer num from shape (None, 1) this is my custom metric:
def custom_metric(y_true,y_pred):
y_true = float(y_true)
y_pred = float(y_pred)
y_true = tf.unstack(y_true)
y_pred = tf.unstack(y_pred)
sqr_pred_error = K.square(y_true - y_pred)
sqr_y_true = K.square(y_true)
r = []
for i in y_true:
if sqr_pred_error[i] < sqr_y_true[i] or sqr_pred_error[i] == sqr_y_true[i]:
result = 1
print("result: 1")
else:
result = 0
print("result: 0")
r.append(result)
r = tf.stack(r)
return K.sum(r)/K.shape(r)
You probably don't need a loop in there. Looks like you just need a bunch of 0s and 1s.
1
- If sqr_pred_error <= sqr_y_true
0
- elseThen you can do the following.
def custom_metric(y_true,y_pred):
y_true = tf.cast(y_true, 'float32')
y_pred = tf.cast(y_pred, 'float32')
sqr_pred_error = K.square(y_true - y_pred)
sqr_y_true = K.square(y_true)
res = tf.where(sqr_pred_error<=sqr_y_true, tf.ones_like(y_true), tf.zeros_like(y_true))
return K.mean(res)