I made a model that predict a character on an image, to do license plate recognition. It works very well on my computer, but I need to put this work in an Android app. So I developped a little application and convert my keras model to tflite. And now it's always predicting the same character.
I converted the model using :
mod_path = "License_character_recognition.h5"
def load_model(path,custom_objects={},verbose=0):
#from tf.keras.models import model_from_json
path = splitext(path)[0]
with open('MobileNets_character_recognition.json','r') as json_file:
model_json = json_file.read()
model = tf.keras.models.model_from_json(model_json, custom_objects=custom_objects)
model.load_weights('%s.h5' % path)
if verbose: print('Loaded from %s' % path)
return model
keras_mod = load_model(mod_path)
converter = tf.lite.TFLiteConverter.from_keras_model(keras_mod)
tflite_model = converter.convert()
# Save the TF Lite model.
with tf.io.gfile.GFile('ocr.tflite', 'wb') as f:
f.write(tflite_model)
Is there a better way to convert the model, or am I missing something ?
EDIT : This is what I did to manage the bitmap
try {
Mat bis = Utils.loadResource(MainActivity.this, R.drawable.plaque, Imgcodecs.IMREAD_COLOR);
cvtColor(bis, bis, COLOR_BGR2RGB);
Mat m = Utils.loadResource(MainActivity.this, R.drawable.plaque,Imgcodecs.IMREAD_GRAYSCALE);
blur(m, blur, new Size(2,2));
threshold(blur, bin, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
ArrayList<MatOfPoint> contours;
contours = getContours(bin);
//Try to sort from left to right
Collections.sort(contours, new SortByTopLeft());
Log.d("Contour", String.valueOf(contours.size()));
int i = 0;
for (MatOfPoint c : contours){
Rect cont = boundingRect(c);
float ratio = (float) (cont.height/cont.width);
Log.d("Ratio", String.valueOf(ratio));
float pourcent = ((float) cont.height/ (float) bin.height());
Log.d("pourcent", String.valueOf(pourcent));
if (ratio >= 1 && ratio <= 2.5){
if(pourcent >=0.5){
Log.d("Ui", String.valueOf(cont));
rectangle(bis, cont, new Scalar(0,255,0), 2);
//Separate numbers
Mat curr_num = new Mat(bin, cont);
Bitmap curbit = Bitmap.createBitmap(curr_num.cols(), curr_num.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(curr_num, curbit);
images[i].setImageBitmap(curbit);
int charac = classifier.classify(curbit);
Log.d("Result", String.valueOf(charac));
result.setText(String.valueOf(charac));
if (i < 6){
i++;
}
}
}
You can use TensorFlow Lite Android Support Library. This library is designed to help process the input and output of TensorFlow Lite models, and make the TensorFlow Lite interpreter easier to use.
Use it like below and find more at this article:
Bitmap assetsBitmap = getBitmapFromAsset(mContext, "picture.jpg");
// Initialization code
// Create an ImageProcessor with all ops required. For more ops, please
// refer to the ImageProcessor Architecture.
ImageProcessor imageProcessor =
new ImageProcessor.Builder()
.add(new ResizeOp(32, 32, ResizeOp.ResizeMethod.BILINEAR))
//.add(new NormalizeOp(127.5f, 127.5f))
.build();
// Create a TensorImage object. This creates the tensor of the corresponding
// tensor type (flot32 in this case) that the TensorFlow Lite interpreter needs.
TensorImage tImage = new TensorImage(DataType.FLOAT32);
// Analysis code for every frame
// Preprocess the image
tImage.load(assetsBitmap);
tImage = imageProcessor.process(tImage);
// Create a container for the result and specify that this is not a quantized model.
// Hence, the 'DataType' is defined as FLOAT32
TensorBuffer probabilityBuffer = TensorBuffer.createFixedSize(new int[]{1, 10}, DataType.FLOAT32);
interpreter.run(tImage.getBuffer(), probabilityBuffer.getBuffer());
Log.i("RESULT", Arrays.toString(probabilityBuffer.getFloatArray()));
return getSortedResult(result);
}