Core Image框架中定义了CIDetector和CUFaceFeature,它们使用起来提供了简单而强大的人脸检测功能
但是这些方法没有针对实时性进行优化,在AVFoundation中通过特定的AVCaptureOutput类型的 AVCaptureMetadataOutput也可以实现人脸检测。
当使用人脸检测时,会输出一个具体子类类型AVMetadataFaceObject。AVMetadataFaceObject 定义了多个用户描述被检测到的人脸的属性,最重要的是人脸的边界(bounds),还给出了用于定义检测人脸倾斜角(roll angle)表示人的头部向肩膀方向侧倾角度,偏转角(yaw angle)表示人脸绕y轴旋转的角度。
配置会话与之前使用摄像头拍照类似,区别在于更换一个output,如:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
| - (BOOL)setupSessionOutputs:(NSError **)error {
self.metadataOutput = [[AVCaptureMetadataOutput alloc] init];
if ([self.captureSession canAddOutput:self.metadataOutput]) { [self.captureSession addOutput:self.metadataOutput];
NSArray *metadataObjectTypes = @[AVMetadataObjectTypeFace]; self.metadataOutput.metadataObjectTypes = metadataObjectTypes;
dispatch_queue_t mainQueue = dispatch_get_main_queue(); [self.metadataOutput setMetadataObjectsDelegate:self queue:mainQueue];
return YES;
} else { if (error) { NSDictionary *userInfo = @{NSLocalizedDescriptionKey: @"Failed to still image output."}; *error = [NSError errorWithDomain:THCameraErrorDomain code:THCameraErrorFailedToAddOutput userInfo:userInfo]; } return NO; } }
|
AVCaptureMetadataOutputObjectsDelegate 需要实现:
1 2 3 4 5 6 7 8 9 10 11 12 13
| - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputMetadataObjects:(NSArray *)metadataObjects fromConnection:(AVCaptureConnection *)connection {
for (AVMetadataFaceObject *face in metadataObjects) { NSLog(@"Face detected with ID: %li", (long)face.faceID); NSLog(@"Face bounds: %@", NSStringFromCGRect(face.bounds)); } [self.faceDetectionDelegate didDetectFaces:metadataObjects]; }
|
THFaceDetectionDelegate长这样:
1 2 3 4 5
| @protocol THFaceDetectionDelegate <NSObject>
- (void)didDetectFaces:(NSArray *)faces;
@end
|
获取到AVMetadataFaceObject 后,将人脸数据可视化:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
| - (void)didDetectFaces:(NSArray *)faces { NSArray *transformedFaces = [self transformedFacesFromFaces:faces]; NSMutableArray *lostFaces = [self.faceLayers.allKeys mutableCopy];
for (AVMetadataFaceObject *face in transformedFaces) { NSNumber *faceID = @(face.faceID); [lostFaces removeObject:faceID];
CALayer *layer = [self.faceLayers objectForKey:faceID]; if (!layer) { layer = [self makeFaceLayer]; [self.overlayLayer addSublayer:layer]; self.faceLayers[faceID] = layer; }
layer.transform = CATransform3DIdentity; layer.frame = face.bounds; if (face.hasRollAngle) { CATransform3D t = [self transformForRollAngle:face.rollAngle]; layer.transform = CATransform3DConcat(layer.transform, t); } if (face.hasYawAngle) { CATransform3D t = [self transformForYawAngle:face.yawAngle]; layer.transform = CATransform3DConcat(layer.transform, t); } }
for (NSNumber *faceID in lostFaces) { CALayer *layer = [self.faceLayers objectForKey:faceID]; [layer removeFromSuperlayer]; [self.faceLayers removeObjectForKey:faceID]; }
}
- (NSArray *)transformedFacesFromFaces:(NSArray *)faces { NSMutableArray *transformedFaces = [NSMutableArray array]; for (AVMetadataObject *face in faces) { AVMetadataObject *transformedFace = [self.previewLayer transformedMetadataObjectForMetadataObject:face]; [transformedFaces addObject:transformedFace]; } return transformedFaces; }
- (CALayer *)makeFaceLayer { CALayer *layer = [CALayer layer]; layer.borderWidth = 5.0f; layer.borderColor = [UIColor colorWithRed:0.188 green:0.517 blue:0.877 alpha:1.000].CGColor; return layer; }
- (CATransform3D)transformForRollAngle:(CGFloat)rollAngleInDegrees { CGFloat rollAngleInRadians = THDegreesToRadians(rollAngleInDegrees); return CATransform3DMakeRotation(rollAngleInRadians, 0.0f, 0.0f, 1.0f); }
- (CATransform3D)transformForYawAngle:(CGFloat)yawAngleInDegrees { CGFloat yawAngleInRadians = THDegreesToRadians(yawAngleInDegrees);
CATransform3D yawTransform = CATransform3DMakeRotation(yawAngleInRadians, 0.0f, -1.0f, 0.0f);
return CATransform3DConcat(yawTransform, [self orientationTransform]); }
|