【问题标题】:LBPHFaceRecognizer predict function always return 1LBPHFaceRecognizer 预测函数总是返回 1
【发布时间】:2020-07-05 19:41:25
【问题描述】:

我正在开发一个面部识别系统,为此我选择了 LBPH 算法来完成这项任务。我收集了用户的样本图像并对其进行了训练。问题是在识别人脸时,LBPHRecognizer 的 predict() 总是为标签返回相同的值,但为置信度返回不同的值。即使面部未知,它也会返回 1。

我一直在使用的技术:Python 3.7.4、OpenCV 4.1.2

采集样本图片的代码

import cv2
import numpy as np
import os
import requests
import time
from PIL import Image

 class CollectFaceWebCam():
    def __init__(self, sid):
        self.studentId = sid

        #capture webcam 
        self.LiveWebCamera = cv2.VideoCapture(0)

        #pre-trained dataset (haar-cascade classifier)
        self.faceDataSet = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')

        #sample image capture counter
        self.imgCounter = 0

        self.directoryName = 'sampleImgFolder'

        #check path 
        if not os.path.exists(self.directoryName):
            os.makedirs(self.directoryName)

        if not os.path.exists(self.directoryName + '/' + self.studentId):
            os.makedirs(self.directoryName + '/' + self.studentId)


    def gen(self):
        while True:
            condition, frame = self.LiveWebCamera.read() #capture frame

            img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#conversion to gray scale 

            #face detection 
            faces = self.faceDataSet.detectMultiScale(  # Detect face sizes
            img,
            scaleFactor=1.3,
            minNeighbors=5,
            minSize=(100, 100),
            flags=cv2.CASCADE_SCALE_IMAGE
            )

            for (x, y, w, h) in faces:
                end_crd_x = x + w  # face start coordinates
                end_crd_y = y + h  #face end coordinate

                #draw rectangle 
                #@parms image, start plot, end plot, thickness, color
                cv2.rectangle(frame, (x, y), (end_crd_x, end_crd_y), (0, 255, 33), 1)


                #accepts multiple face        
                if len(faces) >= 0:

                    #face must be single in frame
                    if len(faces) == 1:
                        detectedImg = img[y:y + h, x:x + w]

                        #checking blurness of image 
                        blurValue = cv2.Laplacian(detectedImg, cv2.CV_64F).var()

                        #ignoring the blury images
                        if not blurValue <= 60:

                            newImg = img[y:y + h, x:x + w] #new img 
                            #saving the detected faces
                            filename = '{}\{}\{}\{}_{}'.format(os.getcwd(), self.directoryName, self.studentId, self.studentId, self.imgCounter) + '.jpg'
                            cv2.imwrite(filename, newImg)
                            self.imgCounter += 1

                    else:
                        cv2.putText(frame,"Multiple Face not allowed", (50,150), cv2.FONT_HERSHEY_SIMPLEX, 1, (237, 20, 5), thickness=2)


            cv2.putText(frame,"Collecting Sample", (50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (250, 250, 250), thickness=3)
            cv2.putText(frame,"Image Count " + str(self.imgCounter), (50,200), cv2.FONT_HERSHEY_SIMPLEX, 2, (237, 20, 5), thickness=2)

            cv2.imshow('Collecting Sample', frame) # display frames

            k = cv2.waitKey(100) & 0xff # capture when user press 'esc'
            if k == 27:
                break
            elif self.imgCounter == 110:
                break

        self.LiveWebCamera.release() #stop video capture
        cv2.destroyAllWindows() #close all windows


class CleanSampleImages():
    def __init__(self):
        self.faceDataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
        self.eyeDataset = cv2.CascadeClassifier('resources/haarcascade_eye.xml')
        self.targetFolder = 'sampleImgFolder'

    def checkFace(self):
        os.chdir(self.targetFolder) 
        for directory in os.listdir():
            os.chdir(directory)
            for files in os.listdir():
                imagePath = '{}/{}'.format(os.getcwd(), files)

                imagePil = Image.open(imagePath).convert('L')
                imageNumpy = np.array(imagePil) #conversion of normal image to numpy array

                #detect face 
                faces = self.faceDataset.detectMultiScale(imageNumpy)
                #deleting image file if face is not found 
                if not len(faces) == 1:
                    os.remove(files)
                    break

                for (x, y, w, h) in faces:
                    #detect eye from selected 
                    eyes = self.eyeDataset.detectMultiScale(imageNumpy)
                    if not len(eyes) > 0 and len(eyes) <=2:
                        #deleting image file if eye count of image is less than 0 or more than 2
                        os.remove(files)
            os.chdir('../')
        os.chdir('../')    





#id must be in X-X-ID eg. a-b-342
t = CollectFaceWebCam('sa-t-1')
t.gen()
clean = CleanSampleImages
c.checkFace()

以上代码由两个类 CollectFaceWebCam 和 CleanSampleImages 组成。 CollectFaceWebCam 用于收集样本图像。 CleanSampleImages 用于清理收集的数据。如果图像不包含人脸,则删除该文件。

训练图像的代码

import os
import cv2
import numpy as np
from PIL import Image

class Trainer():
    def __init__(self):
        self.recognizer = cv2.face.LBPHFaceRecognizer_create()
        self.targetImagesDirectory="sampleImgFolder"
        self.dataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')      
    def getImgwithId(self):
        sampleImage, sampleImageId = [], []
        filename = '{}\\{}'.format(os.getcwd(), self.targetImagesDirectory)

        if os.path.exists(filename):
            os.chdir(filename)
            print('current path is ' + os.getcwd())
            for f in os.listdir():
                imgPath = os.path.join(filename, f)
                os.chdir(imgPath)
                for file in os.listdir():
                    #reteving id  from filename (filename format : ta-s-ID_Filename.jpg)
                    id = file.split('_')
                    id = id[0].split('-')
                    id = id[2]
                    imageFilePath = imgPath + '\\' + file

                    imagePil = Image.open(imageFilePath).convert('L')

                    #conversion to numpy array
                    imageNp = np.array(imagePil, 'uint8')

                    faces = self.dataset.detectMultiScale(imageNp)
                    for (x, y, w, h) in faces:
                        sampleImage.append(imageNp)
                        sampleImageId.append(id)
                os.chdir('../')
            os.chdir('../')
        return sampleImage, np.array(sampleImageId, dtype = int)

def train(self, data, label):
    try:
        self.recognizer.train(data, label)
        self.msg = 'Training Successful'
        print('writting')
        self.recognizer.write('date.yml')
        print('writing finished')
    except:
        self.msg = 'Core: Training Error'
        print('except')

tr = Trainer()
sampleFaces, sampleFaceId = (tr.getImgwithId())
tr.train(sampleFaces, sampleFaceId)

人脸识别代码

import os
import cv2
import numpy as np
from PIL import Image

class Recognizer():
def __init__(self):
    self.recognizer = cv2.face.LBPHFaceRecognizer_create()
    self.recognizer.read('date.yml')
    self.targetImagesDirectory="sampleImgFolder"
    self.dataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')      

    self.captureVideo = cv2.VideoCapture(0)
    self.font = cv2.FONT_HERSHEY_SIMPLEX = 2  # Font
    self.predictedUser = []

def gen(self):
    while True:
        condition, frame = self.captureVideo.read() #capture frame

        img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#conversion to gray scale 

        #face detection 
        faces = self.dataset.detectMultiScale(  # Detect face sizes
        img,
        scaleFactor=1.3,
        minNeighbors=5,
        minSize=(100, 100),
        flags=cv2.CASCADE_SCALE_IMAGE
        )

        for (x, y, w, h) in faces:
            end_crd_x = x + w  # face start coordinates
            end_crd_y = y + h  #face end coordinate

            #draw rectangle 
            #@parms image, start plot, end plot, thickness, color
            cv2.rectangle(frame, (x, y), (end_crd_x, end_crd_y), (0, 255, 33), 1)

            predictUser, confidence = self.recognizer.predict(img[y:y+h,x:x+w])
            self.predictedUser.append(predictUser)

        cv2.imshow('test', frame)

        k = cv2.waitKey(100) & 0xff # capture when user press 'esc'
        if k == 27:
            break

    self.captureVideo.release()
    cv2.destroyAllWindows()

r = Recognizer()
r.gen()
print(r.predictedUser)

"predictUser, confidence = self.recognizer.predict(img[y:y+h,x:x+w])" 识别器类中的代码行总是返回相同的标签值。人脸识别代码输出如下:

我很想知道问题的原因和位置,因为我的技能和研究无法引导我识别问题。

【问题讨论】:

    标签: python opencv face-recognition opencv-python


    【解决方案1】:

    这可能是因为数据收集过程。我看到你多次使用级联分类器,你可以限制它。在网络摄像头上检查人脸时,您可以当时使用分类器并仅存储提取/裁剪的人脸。同样在预测期间,使用置信度作为阈值来限制错误预测。

    【讨论】:

      猜你喜欢
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 2016-03-22
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 2019-09-07
      相关资源
      最近更新 更多