Suppose I create simplest model in Keras:
from keras.layers import *
from keras import Input, Model
import coremltools
def MyModel(inputs_shape=(None,None,3), channels=64):
inpt = Input(shape=inputs_shape)
# channels
skip = Conv2D(channels, (3, 3), strides=1, activation=None, padding='same', name='conv_in')(inpt)
out = Conv2D(3, (3, 3), strides=1, padding='same', activation='tanh',name='out')(skip)
return Model(inputs=inpt, outputs=out)
model = MyModel()
coreml_model = coremltools.converters.keras.convert(model,
input_names=["inp1"],
output_names=["out1"],
image_scale=1.0,
model_precision='float32',
use_float_arraytype=True,
input_name_shape_dict={'inp1': [None, 384, 384, 3]}
)
spec = coreml_model._spec
print(spec.description.input[0])
print(spec.description.input[0].type.multiArrayType.shape)
print(spec.description.output[0])
coremltools.utils.save_spec(spec, "test.mlmodel")
The output is:
2 : out, <keras.layers.convolutional.Conv2D object at 0x7f08ca491470>
3 : out__activation__, <keras.layers.core.Activation object at 0x7f08ca4b0b70>
name: "inp1"
type {
multiArrayType {
shape: 3
shape: 384
shape: 384
dataType: FLOAT32
}
}
[3, 384, 384]
name: "out1"
type {
multiArrayType {
shape: 3
dataType: FLOAT32
}
}
So the output shape is 3, which is incorrect. And when I try to get rid from input_name_shape_dict
I get:
Please provide a finite height (H), width (W) & channel value (C) using input_name_shape_dict arg with key = 'inp1' and value = [None, H, W, C]
Converted .mlmodel can be modified to have flexible input shape using coremltools.models.neural_network.flexible_shape_utils
So it wants NHWC.
Attempt of inference yields:
Layer 'conv_in' of type 'Convolution' has input rank 3 but expects rank at least 4
When I attempt to add extra dimension to input:
spec.description.input[0].type.multiArrayType.shape.extend([1, 3, 384, 384])
del spec.description.input[0].type.multiArrayType.shape[0]
del spec.description.input[0].type.multiArrayType.shape[0]
del spec.description.input[0].type.multiArrayType.shape[0]
[name: "inp1"
type {
multiArrayType {
shape: 1
shape: 3
shape: 384
shape: 384
dataType: FLOAT32
}
}
]
I get for inference:
Shape (1 x 384 x 384 x 3) was not in enumerated set of allowed shapes
Following this advice and making input shape (1,1,384,384,3)
dos not help.
How can I make it working and producing correct output?
Inference:
From PIL import Image
model_cml = coremltools.models.MLModel('my.mlmodel')
# load image
img = np.array(Image.open('patch4.png').convert('RGB'))[np.newaxis,...]/127.5 - 1
# Make predictions
predictions = model_cml.predict({'inp1':img})
# save result
res = predictions['out1']
res = np.clip((res[0]+1)*127.5,0,255).astype(np.uint8)
Image.fromarray(res).save('out32.png')
UPDATE:
I am able to run this model with inputs (3,1,384,384)
, the result produces is (1,3,3,384,384)
which does not make any sense for me.
UPDATE 2:
setting fixed shape in Keras
def MyModel(inputs_shape=(384,384,3), channels=64):
inpt = Input(shape=inputs_shape)
fixed output shape problem, but I still cannot run the model (Layer 'conv_in' of type 'Convolution' has input rank 3 but expects rank at least 4
)
UPDATE:
The following works to get rid of input and conv_in shapes mismatch.
1). Downgrade to coremltools==3.0
. Version 3.3 (model version 4) seems broken.
2.) Use fixed shape in keras model, no input_shape_dist and variable shape for coreml model
from keras.layers import *
from keras import Input, Model
import coremltools
def MyModel(inputs_shape=(384,384,3), channels=64):
inpt = Input(shape=inputs_shape)
# channels
skip = Conv2D(channels, (3, 3), strides=1, activation=None, padding='same', name='conv_in')(inpt)
out = Conv2D(3, (3, 3), strides=1, padding='same', activation='tanh',name='out')(skip)
return Model(inputs=inpt, outputs=out)
model = MyModel()
model.save('test.model')
print(model.summary())
'''
# v.3.3
coreml_model = coremltools.converters.keras.convert(model,
input_names=["image"],
output_names="out1",
image_scale=1.0,
model_precision='float32',
use_float_arraytype=True,
input_name_shape_dict={'inp1': [None, 384, 384, 3]}
)
'''
coreml_model = coremltools.converters.keras.convert(model,
input_names=["image"],
output_names="out1",
image_scale=1.0,
model_precision='float32',
)
spec = coreml_model._spec
from coremltools.models.neural_network import flexible_shape_utils
shape_range = flexible_shape_utils.NeuralNetworkMultiArrayShapeRange()
shape_range.add_channel_range((3,3))
shape_range.add_height_range((64, 384))
shape_range.add_width_range((64, 384))
flexible_shape_utils.update_multiarray_shape_range(spec, feature_name='image', shape_range=shape_range)
print(spec.description.input)
print(spec.description.input[0].type.multiArrayType.shape)
print(spec.description.output)
coremltools.utils.save_spec(spec, "my.mlmodel")
In the inference script, feed array of the shape (1,1,3,384,384)
:
img = np.zeros((1,1,3,384,384))
# Make predictions
predictions = model_cml.predict({'inp1':img})
res = predictions['out1'] # (3, 384,384)
You can ignore what the mlmodel file has in the output shape if it is incorrect. This is more of a metadata issue, i.e. the model will still work fine and do the right thing. The converter isn't always able to figure out the correct output shape (not sure why).