I want to build a predictor from a an tf.estimator.Estimator model. Therefore I need to specify a input_receiver_fn that specifies the preprocessing graph from the receiver tensors to the features that will be passed to the model_fn by the predictor.
Here is an example for an eval_input_fn for the Estimator:
def eval_input_fn(params):
ds = tf.data.Dataset.from_generator(
generator=Eval_Generator(params),
output_types=(tf.uint16,tf.uint16),
output_shapes = ([3]+params['crop_size'],[2]+params['crop_size']))
augmentations = [Convert,Downsample,Clip]
ds = ds.repeat()
for augmentation in augmentations:
ds = ds.map(augmentation, num_parallel_calls=params['threads'])
ds = ds.batch(1).prefetch(None)
return ds
I changed the augmentation functions from taking in two arguments (features: tf.Tensor, labels: tf.Tensor) to taking only one argument (features: tf.Tensor) and wrote the according input_receiver_fn that looks like this:
def serving_input_receiver_fn():
rec_raw = tf.placeholder(tf.float32, [3, 256, 256, 256],name='raw')
raw = Convert(rec_raw)
raw = Downsample(raw)
raw = Clip(raw)
raw = tf.expand_dims(raw,0)
return tf.estimator.export.TensorServingInputReceiver(features=raw,receiver_tensors=rec_raw)
The function returns the following object:
TensorServingInputReceiver(features=<tf.Tensor 'ExpandDims_1:0' shape=(1, 3, 128, 128, 128) dtype=float32>, receiver_tensors={'input': <tf.Tensor 'raw:0' shape=(3, 256, 256, 256) dtype=float32>}, receiver_tensors_alternatives=None)
which seems pretty right. But when it try to instantiate the predictor by:
config = tf.estimator.RunConfig(model_dir = params['model_dir'])
estimator = tf.estimator.Estimator(model_fn=model_fn, params=params,config=config)
predict_fn = tf.contrib.predictor.from_estimator(estimator, serving_input_receiver_fn)
I'll get the following error message:
INFO:tensorflow:Calling model_fn.
Traceback (most recent call last):
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 510, in _apply_op_helper
preferred_dtype=default_dtype)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1146, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 229, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 208, in constant
value, dtype=dtype, shape=shape, verify_shape=verify_shape))
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py", line 430, in make_tensor_proto
raise ValueError("None values not supported.")
ValueError: None values not supported.
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/contrib/predictor/predictor_factories.py", line 105, in from_estimator
config=config)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/contrib/predictor/core_estimator_predictor.py", line 72, in __init__
serving_input_receiver, estimator, output_key)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/contrib/predictor/core_estimator_predictor.py", line 37, in _get_signature_def
estimator.config)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 235, in public_model_fn
return self._call_model_fn(features, labels, mode, config)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1195, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "/fast/AG_Kainmueller/jrumber/flylight_01/train_tf.py", line 227, in model_fn
gt,fg = tf.unstack(labels,num=2,axis=1)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1027, in unstack
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 9429, in unpack
"Unpack", value=value, num=num, axis=axis, name=name)
File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 528, in _apply_op_helper
(input_name, err))
ValueError: Tried to convert 'value' to a tensor and failed. Error: None values not supported.
Since it could be a problem with my model_fn, I'll post it too:
def model_fn(features,labels,mode,params):
gt,fg = tf.unstack(labels,num=2,axis=1)
gt.set_shape([1]+params['input_size'])
fg.set_shape([1]+params['input_size'])
features.set_shape([1,3]+params['input_size'])
# first layer to set input_shape
features = tf.keras.layers.Conv3D(
input_shape = tuple([3]+params['input_size']),
data_format = 'channels_first',
filters = params['chan'],
kernel_size = [3,3,3],
strides=(1, 1, 1),
padding='same',
activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(l=0.01))(features)
# U-Net
out = unet(features, params['unet_initial_filters'], params['width_factor'], params['architecture'])
# Embedding conv pass
output_batched = conv_pass(
out,
kernel_size=1,
num_fmaps=params['chan'],
num_repetitions=1,
activation=None,
name='conv_embedding')
output = tf.squeeze(output_batched)
# Fg/Bg segmentation conv pass
mask_batched = conv_pass(
out,
kernel_size=1,
num_fmaps=1,
num_repetitions=1,
activation='sigmoid',
name='conv_mask')
prob_mask = tf.squeeze(mask_batched)
logits_mask = logit(prob_mask)
# store predictions in dict
predictions = {
'prob_mask': tf.expand_dims(prob_mask,0),
'embedding': output,
'gt': tf.squeeze(gt,0)}
# RAIN mode
if mode == tf.contrib.learn.ModeKeys.TRAIN:
loss , l_var, l_dist, l_reg = discriminative_loss_single(prediction=output,
correct_label=tf.squeeze(gt),
feature_dim=params['chan'],
delta_v= params['delta_v'],
delta_d= params['delta_d'],
param_var= params['param_var'],
param_dist= params['param_dist'],
param_reg= params['param_reg']
)
mask_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.squeeze(fg),
logits=logits_mask))
reg_loss = tf.losses.get_regularization_loss() * 1e-6
loss += mask_loss + reg_loss
opt = tf.train.AdamOptimizer(
learning_rate=0.5e-4,
beta1=0.95,
beta2=0.999,
epsilon=1e-8)
optimizer = opt.minimize(loss, global_step=tf.train.get_global_step())
global_step = tf.Variable(1, name='global_step', trainable=False, dtype=tf.int32)
increment_global_step_op = tf.assign(global_step, global_step+1)
logging_hook = tf.train.LoggingTensorHook({"loss" : loss,'global_step':increment_global_step_op}, every_n_iter=1)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, loss=loss, train_op=optimizer, training_hooks=[logging_hook])
# PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
export_outputs = {
'predict_output': tf.estimator.export.PredictOutput(predictions)
}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
export_outputs = {
'eval_output': tf.estimator.export.EvalOutput(predictions)
}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)
Does anybody spot my mistake here? Best :)
The error was in the model_fn. The following lines have to be moved down to the # TRAIN mode part of the function
gt,fg = tf.unstack(labels,num=2,axis=1)
gt.set_shape([1]+params['input_size'])
fg.set_shape([1]+params['input_size'])
Estimator.predict will feed only the features and None instead of labels, therefore tf.unstack will throw an exception, so all operations that work on the labels have to be moved to the # train mode part of the model_fn.