Search code examples
c#computer-visionface-detectionemgucv

how to detect the wrong person who are not in database and prompt error if no face are detected inside the image box?


Here's the code when go into examination pages. I straight load the face from my folder to get to recognize who's the candidate and there's no problem to recognize each of the person who been enroll into the system but i have no idea to detect if the person is not the candidate.

My program is goes like : Login Form,Enroll Form and Exam Form (using emgucv in C#) I login with Kelvin's username and password (which already enroll into the system) and then goes to Exam Form, once exam start camera is on and face detected the person is Kelvin but even a third party who has been enroll oso or not, the face detect is still detect the person other then Kelvin or stranger. I hope to make it like if the person is not Kelvin and so system will pause or prompt message tell that "wrong candidate detection,click ok to detect again". Sorry if there's alot broken english or hard to understand my situation but appreciate with ur help.

void FrameGrabber(object sender, EventArgs e)
    {
        label3.Text = "0";
        //label4.Text = "";
        NamePersons.Add("");


        //Get the current frame form capture device
        currentFrame = grabber.QueryFrame().Resize(240, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

        //Convert it to Grayscale
        gray = currentFrame.Convert<Gray, Byte>();

        //Face Detector
        MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
      face,
      1.2,
      10,
      Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
      new Size(20, 20));

        //Action for each element detected
        foreach (MCvAvgComp f in facesDetected[0])
        {
            t = t + 1;
            result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            //draw the face detected in the 0th (gray) channel with blue color
            currentFrame.Draw(f.rect, new Bgr(Color.LightGreen), 2);


            if (trainingImages.ToArray().Length != 0)
            {
                //TermCriteria for face recognition with numbers of trained images like maxIteration
                MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                //Eigen face recognizer
                EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                   trainingImages.ToArray(),
                   labels.ToArray(),
                   3000,
                   ref termCrit);

                name = recognizer.Recognize(result);

                //Draw the label for each face detected and recognized
                currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));

            }

            NamePersons[t - 1] = name;
            NamePersons.Add("");


            //Set the number of faces detected on the scene
            label3.Text = facesDetected[0].Length.ToString();



        }

        t = 0;

This is enrollment form

    private void button2_Click(object sender, System.EventArgs e)
    {
        try
        {
            //Trained face counter
            ContTrain = ContTrain + 1;

            //Get a gray frame from capture device
            gray = grabber.QueryGrayFrame().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
            face,
            1.2,
            10,
            Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
            new Size(20, 20));

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                TrainedFace = currentFrame.Copy(f.rect).Convert<Gray, byte>();
                break;
            }

            //resize face detected image for force to compare the same size with the 
            //test image with cubic interpolation type method
            TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            trainingImages.Add(TrainedFace);
            labels.Add(textBox1.Text);

            //Show face added in gray scale
            imageBox1.Image = TrainedFace;

            //Write the number of triained faces in a file text for further load
            File.WriteAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", trainingImages.ToArray().Length.ToString() + "%");

            //Write the labels of triained faces in a file text for further load
            for (int i = 1; i < trainingImages.ToArray().Length + 1; i++)
            {
                trainingImages.ToArray()[i - 1].Save(Application.StartupPath + "/TrainedFaces/face" + i + ".bmp");
                File.AppendAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", labels.ToArray()[i - 1] + "%");
            }

            MessageBox.Show(textBox1.Text + "Face detected and Added", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);



            OleDbCommand cmd = new OleDbCommand();
            cmd.CommandType = CommandType.Text;
            cmd.CommandText = "insert into Login (username,[password]) values ('" + textBox1.Text + "','" + textBox4.Text + "')";
            cmd.Connection = conn;
            conn.Open();
            cmd.ExecuteNonQuery();
            MessageBox.Show("User Account Succefully Created", "Caption", MessageBoxButtons.OKCancel, MessageBoxIcon.Information);
            conn.Close();
            textBox1.Clear();
            textBox4.Clear();

            button2.Enabled = false;
            button3.Enabled = true;


        }
        catch
        {
            MessageBox.Show("Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
        }
    }


    void FrameGrabber(object sender, EventArgs e)
    {
        label3.Text = "0";
        //label4.Text = "";
        NamePersons.Add("");


        //Get the current frame form capture device
        currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                //Convert it to Grayscale
                gray = currentFrame.Convert<Gray, Byte>();

                //Face Detector
                MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
              face,
              1.2,
              10,
              Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
              new Size(20, 20));

                //Action for each element detected
                foreach (MCvAvgComp f in facesDetected[0])
                {
                    t = 1;
                    result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                    //draw the face detected in the 0th (gray) channel with blue color
                    currentFrame.Draw(f.rect, new Bgr(Color.LightGreen), 2);


                    if (trainingImages.ToArray().Length != 0)
                    {
                        //TermCriteria for face recognition with numbers of trained images like maxIteration
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    //Eigen face recognizer
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                       trainingImages.ToArray(),
                       labels.ToArray(),
                       3000,
                       ref termCrit);

                    name = recognizer.Recognize(result);

                        //Draw the label for each face detected and recognized
                    currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));

                    }

                        NamePersons[t-1] = name;
                        NamePersons.Add("");


                    //Set the number of faces detected on the scene
                    label3.Text = facesDetected[0].Length.ToString();



                }

                    t = 0;

                    //Names concatenation of persons recognized
                for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
                {
                    names = names + NamePersons[nnn] + ", ";
                }
                //Show the faces procesed and recognized
                imageBoxFrameGrabber.Image = currentFrame;
                label4.Text = names;
                names = "";
                //Clear the list(vector) of names
                NamePersons.Clear();

            }

Solution

  • As I understood; after the enrollment you want to confirm the user with user's face using eigenfaces method.

    When Eigenfaces methods will fail if the face is not recognized. If you look at the reference of Emgu reference of Recognize method you can see it will return String.empty if face is not recognized.

    So for example in this code; check your result first.

          name = recognizer.Recognize(result);
           //CHECK IF name IS EMPTY, IF EMPTY STOP!
    
            //Draw the label for each face detected and recognized
            currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
    

    I am sorry if I misunderstood you. In future questions it would be great if you post corresponding parts of your code. Hope this helps.