【问题标题】:Face Tracking in iPhone using OpenCV使用 OpenCV 在 iPhone 中进行人脸跟踪
【发布时间】:2011-09-07 20:29:36
【问题描述】:

我想在 iPhone 中创建与 code 一样的面部跟踪。它是一个 mac os 代码,但我想在 iPhone 中使其与给定代码相同。

关于在 iphone 中进行面部跟踪的任何想法。

【问题讨论】:

    标签: iphone face-detection


    【解决方案1】:

    您必须使用 OPENCV 检测人脸并将其导入您的代码。在这种方法中,我使用矩形/椭圆来表示检测到的人脸

    -(UIImage *) opencvFaceDetect:(UIImage *)originalImage {
    
        cvSetErrMode(CV_ErrModeParent);
    
        IplImage *image = [self CreateIplImageFromUIImage:originalImage];
    
        // Scaling down
    
        /*
          Creates IPL image (header and data) ----------------cvCreateImage
          CVAPI(IplImage*)  cvCreateImage( CvSize size, int depth, int channels );
         */
    
        IplImage *small_image = cvCreateImage(cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3);
    
        /*SMOOTHES DOWN THYE GUASSIAN SURFACE--------:cvPyrDown*/
        cvPyrDown(image, small_image, CV_GAUSSIAN_5x5);
        int scale = 2;
    
        // Load XML
        NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
        CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
    
        // Check whether the cascade has loaded successfully. Else report and error and quit
    
        if( !cascade )
        {
            NSLog(@"ERROR: Could not load classifier cascade\n");
            //return;
        }
    
        //Allocate the Memory storage
        CvMemStorage* storage = cvCreateMemStorage(0);
    
        // Clear the memory storage which was used before
        cvClearMemStorage( storage );
    
        CGColorSpaceRef colorSpace;
        CGContextRef contextRef;
    
    
        CGRect face_rect;
        // Find whether the cascade is loaded, to find the faces. If yes, then:
        if( cascade )
        {
            CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage, 1.1f, 3, 0, cvSize(20, 20));
            cvReleaseImage(&small_image);
    
            // Create canvas to show the results
            CGImageRef imageRef = originalImage.CGImage;
            colorSpace = CGColorSpaceCreateDeviceRGB();
            contextRef = CGBitmapContextCreate(NULL, originalImage.size.width, originalImage.size.height,
                8, originalImage.size.width * 4, colorSpace,
                kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
            //VIKAS
            CGContextDrawImage(contextRef, CGRectMake(0, 0, originalImage.size.width, originalImage.size.height), imageRef);
    
            CGContextSetLineWidth(contextRef, 4);
            CGContextSetRGBStrokeColor(contextRef, 1.0, 1.0, 1.0, 0.5);
    
            // Draw results on the image:Draw all components of face in the form of small rectangles
            // Loop the number of faces found.
            for(int i = 0; i < faces->total; i++) 
            {
                NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
    
                // Calc the rect of faces
                // Create a new rectangle for drawing the face
    
                CvRect cvrect = *(CvRect*)cvGetSeqElem(faces, i);
                // CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef, 
                //     CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
    
                face_rect = CGContextConvertRectToDeviceSpace(contextRef, 
                    CGRectMake(cvrect.x*scale, cvrect.y, cvrect.width*scale, cvrect.height*scale*1.25));
    
                facedetectapp=(FaceDetectAppDelegate *)[[UIApplication sharedApplication]delegate];
                facedetectapp.grabcropcoordrect=face_rect;
    
                NSLog(@"  FACE off %f %f %f %f",facedetectapp.grabcropcoordrect.origin.x,facedetectapp.grabcropcoordrect.origin.y,facedetectapp.grabcropcoordrect.size.width,facedetectapp.grabcropcoordrect.size.height);
                CGContextStrokeRect(contextRef, face_rect);
                //CGContextFillEllipseInRect(contextRef,face_rect);
                CGContextStrokeEllipseInRect(contextRef,face_rect);
    
                [pool release];
            }
    
        }
        CGImageRef imageRef = CGImageCreateWithImageInRect([originalImage CGImage],face_rect);
        UIImage *returnImage = [UIImage imageWithCGImage:imageRef];
        CGImageRelease(imageRef);
    
    
        CGContextRelease(contextRef);
        CGColorSpaceRelease(colorSpace);
    
        cvReleaseMemStorage(&storage);
        cvReleaseHaarClassifierCascade(&cascade);
    
        return returnImage;
    }
    

    【讨论】:

      【解决方案2】:

      看看这篇文章。它包括一个演示项目,并解释了如何在处理实时视频时获得最佳性能。

      Computer vision with iOS Part 2: Face tracking in live video

      【讨论】:

        猜你喜欢
        • 2011-09-15
        • 1970-01-01
        • 1970-01-01
        • 2017-05-31
        • 1970-01-01
        • 1970-01-01
        • 2016-07-20
        • 2013-07-17
        • 2011-06-22
        相关资源
        最近更新 更多