CoreImage一個好玩的地方就是它可以檢測出一張人物圖片上左眼、右眼以及嘴的詳細位置。請注意這裏使用的是“檢測”而不是“識別”。CoreImage目前還沒有辦法識別人臉僅僅是檢測出人臉的眼嘴位置。
它的使用也是比較方便快捷的。核心代碼如下:
UIImage *image = [UIImage imageNamed:@"baby.jpg"];
CIImage *begingImage = [[CIImage alloc] initWithImage:image];
//創建CIDetector對象,options使用NSDictionary設置採用高品質還是低品質,這裏使用低品質。
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeFace context:nil options:[NSDictionary dictionaryWithObject:CIDetectorAccuracyLow forKey:CIDetectorAccuracy]];
//返回數組中包含圖片臉部特徵信息
NSArray *faceFeatures = [detector featuresInImage:begingImage];
for (CIFaceFeature *faceFeature in faceFeatures) {
//臉部寬度
CGFloat faceWidth = faceFeature.bounds.size.width;
//左眼的判斷
if (faceFeature.hasLeftEyePosition) {
UIView *leftEyeView = [[UIView alloc] initWithFrame:CGRectMake(faceFeature.leftEyePosition.x - faceWidth * 0.15, faceFeature.leftEyePosition.y - faceWidth * 0.15, faceWidth * 0.3, faceWidth * 0.3)];
leftEyeView.backgroundColor = [[UIColor redColor] colorWithAlphaComponent:0.3];
[self.view addSubview:leftEyeView];
}
//右眼的判斷
if (faceFeature.hasRightEyePosition) {
UIView *rightEyeView = [[UIView alloc] initWithFrame:CGRectMake(faceFeature.rightEyePosition.x - faceWidth * 0.15, faceFeature.rightEyePosition.y - faceWidth * 0.15, faceWidth * 0.3, faceWidth * 0.3)];
rightEyeView.backgroundColor = [[UIColor redColor] colorWithAlphaComponent:0.3];
[self.view addSubview:rightEyeView];
}
//嘴的判斷
if (faceFeature.hasMouthPosition) {
UIView *monthView = [[UIView alloc] initWithFrame:CGRectMake(faceFeature.mouthPosition.x - 0.2 * faceWidth, faceFeature.mouthPosition.y - faceWidth * 0.2, faceWidth * 0.4, faceWidth * 0.4)];
monthView.backgroundColor = [[UIColor greenColor] colorWithAlphaComponent:0.3];
[self.view addSubview:monthView];
}
}
self.imageView.image = image;
[self.imageView sizeToFit];
//旋轉
self.imageView.transform = CGAffineTransformMakeScale(1, -1);
self.view.transform = CGAffineTransformMakeScale(1, -1);
需要注意的是:CIImage的座標系和UIKit的座標系是不同的。CIImage的座標系原點在屏幕的左下角,而UIKit的座標系原點在屏幕的左上角,所以座標系需要經過變換。