I am trying to implement a car-license-plate detector.
I managed to find the contours and draw them on the IplImage
, and I dissected the numbers in the plate. I stored the dissected images in an array called numbers
and I am trying to match them with source files in my working directory stored in digits
Now when I try to use cvMatchTemplate
, I get a type error: Assertion failed (src.type() == dst.type()) in cvResize
The code I have posted is quite lengthy but the error is when I try to resize..
public static String recognize(IplImage a){
String plate = "";
cvSaveImage("platebig.jpg",a);
CvRect r = new CvRect();
r.x(0);
r.y(0);
r.width(a.width()/2+50);
r.height(a.height()/2+30);
cvSetImageROI(a, r);
IplImage cropped = cvCreateImage(cvGetSize(a), a.depth(), a.nChannels());
cvCopy(a, cropped);
IplImage tmp = cvCreateImage(cvGetSize(cropped), IPL_DEPTH_8U, 1);
cvCvtColor(cropped, tmp, CV_BGR2GRAY);
cvSmooth(tmp, tmp, CV_GAUSSIAN, 11, 11, 0.2f, 0.1f);
cvEqualizeHist(tmp, tmp);
cvThreshold(tmp, tmp, 128, 255, CV_THRESH_BINARY_INV);
cvDilate(tmp,tmp,null,2);
//cvCanny(tmp, tmp, 100, 50, 3);
IplImage [] numbers = new IplImage[7];
int i = 0;
CvMemStorage storage = cvCreateMemStorage(0);
CvSeq contour = new CvSeq(null);
CvMemStorage storage2 = cvCreateMemStorage(0);
CvSeq contour2 = new CvSeq(null);
cvFindContours( tmp, storage, contour, Loader.sizeof(CvContour.class),
CV_RETR_TREE, CV_CHAIN_APPROX_NONE, cvPoint(0, 0) );
int [] sorter = new int[7];
CvSeq contourLow=cvApproxPoly(contour, Loader.sizeof(CvContour.class), storage,CV_POLY_APPROX_DP,1,1);
for( ; contourLow != null; contourLow = contourLow.h_next() ){
CvRect rect;
rect=cvBoundingRect(contourLow);
if(i<7&&rect.width()>30&&rect.height()>30)
{
numbers[i] = IplImage.create(rect.width(),
rect.height(), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, rect);
cvCopy(cropped, numbers[i]);
IplImage batata = cvCreateImage(cvGetSize(numbers[i]), IPL_DEPTH_8U, 1);
cvCvtColor(numbers[i], batata, CV_BGR2GRAY);
cvSmooth(batata, batata, CV_GAUSSIAN, 11, 11, 0.2f, 0.1f);
cvEqualizeHist(batata, batata);
cvThreshold(batata, batata, 128, 255, CV_THRESH_BINARY_INV);
cvDilate(batata,batata,null,2);
cvCanny(numbers[i],batata,10,100,3);
cvFindContours( batata, storage2, contour2, Loader.sizeof(CvContour.class),
CV_RETR_TREE, CV_CHAIN_APPROX_NONE, cvPoint(0, 0) );
CvSeq contourLow1=cvApproxPoly(contour2, Loader.sizeof(CvContour.class), storage2,CV_POLY_APPROX_DP,1,1);
for( ; contourLow1 != null; contourLow1 = contourLow1.h_next())
{
CvScalar color = CV_RGB( 255,0,0);
cvSetImageROI(cropped, rect);
cvDrawContours(cropped, contourLow1, color, CV_RGB(255,0,0), 127,1,8);
}
r.x(8);
r.y(62);
r.width(52);
r.height(126-62);
numbers[0] = cvCreateImage(cvSize(r.width(), r.height()), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, r);
cvCopy(cropped, numbers[0]);
r.x(79);
r.y(20);
r.width(146-79);
r.height(126-20);
numbers[1] = cvCreateImage(cvSize(r.width(), r.height()), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, r);
cvCopy(cropped, numbers[1]);
r.x(161);
r.y(20);
r.width(224-161);
r.height(126-20);
numbers[2] = cvCreateImage(cvSize(r.width(), r.height()), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, r);
cvCopy(cropped, numbers[2]);
r.x(237);
r.y(20);
r.width(306-237);
r.height(126-20);
numbers[3] = cvCreateImage(cvSize(r.width(), r.height()), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, r);
cvCopy(cropped, numbers[3]);
r.x(316);
r.y(20);
r.width(385-316);
r.height(126-20);
numbers[4] = cvCreateImage(cvSize(r.width(), r.height()), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, r);
cvCopy(cropped, numbers[4]);
r.x(395);
r.y(20);
r.width(464-395);
r.height(126-20);
numbers[5] = cvCreateImage(cvSize(r.width(), r.height()), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, r);
cvCopy(cropped, numbers[5]);
r.x(470);
r.y(20);
r.width(544-470);
r.height(126-20);
numbers[6] = cvCreateImage(cvSize(r.width(), r.height()), cropped.depth(), cropped.nChannels());
cvSetImageROI(cropped, r);
cvCopy(cropped, numbers[6]);
sorter[i] = rect.x();
i++;
}
}
IplImage [] digits = new IplImage[11];
for(int j = 0;j<10;j++){
String str = j +".jpg";
IplImage temp = cvLoadImage(str,CV_LOAD_IMAGE_COLOR);
digits[j] = IplImage.create(temp.width(),
temp.height(), IPL_DEPTH_32F, 3);
cvConvertScale(temp, digits[j]);
}
digits[10] = cvLoadImage("o.jpg");
for(int k =1;k<7;k++){
double max = 0;
int index = 0;
for(int w = 0;w<10;w++){
IplImage temp = IplImage.create(1,1, IPL_DEPTH_32F, 3);
cvZero(temp);
IplImage res = IplImage.create(digits[w].width(),digits[w].height(), digits[w].depth(), 3);
cvResize(numbers[k], res);
cvMatchTemplate(digits[w], res, temp, CV_TM_CCOEFF);
using cvConvert
will help converting the image depth and nchannels at the same time. This will solve the problem of cvResize()
because both images must have the same depth and number of channels.
IplImage a = IplImage.create(img.width(), img.height(), img.depth(), img.nchannels());
cvConvert(img,a);