CoreImage
下CIDetector.h
自帶了四種識(shí)別功能
/ * 人臉識(shí)別 */
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeFace NS_AVAILABLE(10_7, 5_0);
/ * 矩形邊緣識(shí)別 */
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeRectangle NS_AVAILABLE(10_10, 8_0);
/* 二維碼識(shí)別 */
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeQRCode NS_AVAILABLE(10_10, 8_0);
/* 文本識(shí)別 */
#if __OBJC2__
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeText NS_AVAILABLE(10_11, 9_0);
接下來(lái)用CIDetectorTypeRectangle
對(duì)圖片的矩形狀邊緣進(jìn)行識(shí)別宙帝,效果圖如下 (Demo鏈接文章底部已給出)
識(shí)別邊緣
截取
部分代碼:
- 初始化一個(gè)高精度的識(shí)別器
// 高精度邊緣識(shí)別器
- (CIDetector *)highAccuracyRectangleDetector
{
static CIDetector *detector = nil;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^
{
detector = [CIDetector detectorOfType:CIDetectorTypeRectangle context:nil options:@{CIDetectorAccuracy : CIDetectorAccuracyHigh}];
});
return detector;
}
- 調(diào)用照相機(jī)捕獲攝像頭圖像
NSArray *possibleDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice *device = [possibleDevices firstObject];
if (!device) return;
_imageDedectionConfidence = 0.0;
AVCaptureSession *session = [[AVCaptureSession alloc] init];
self.captureSession = session;
[session beginConfiguration];
self.captureDevice = device;
NSError *error = nil;
AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
session.sessionPreset = AVCaptureSessionPresetPhoto;
[session addInput:input];
AVCaptureVideoDataOutput *dataOutput = [[AVCaptureVideoDataOutput alloc] init];
[dataOutput setAlwaysDiscardsLateVideoFrames:YES];
[dataOutput setVideoSettings:@{(id)kCVPixelBufferPixelFormatTypeKey:@(kCVPixelFormatType_32BGRA)}];
[dataOutput setSampleBufferDelegate:self queue:dispatch_get_main_queue()];
[session addOutput:dataOutput];
self.stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
[session addOutput:self.stillImageOutput];
AVCaptureConnection *connection = [dataOutput.connections firstObject];
[connection setVideoOrientation:AVCaptureVideoOrientationPortrait];
- 還需要有個(gè)顯示已捕獲圖像的容器
self.context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
GLKView *view = [[GLKView alloc] initWithFrame:self.bounds];
view.autoresizingMask = UIViewAutoresizingFlexibleWidth | UIViewAutoresizingFlexibleHeight;
view.translatesAutoresizingMaskIntoConstraints = YES;
view.context = self.context;
view.contentScaleFactor = 1.0f;
view.drawableDepthFormat = GLKViewDrawableDepthFormat24;
[self insertSubview:view atIndex:0];
_glkView = view;
glGenRenderbuffers(1, &_renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _renderBuffer);
//圖像將繪制進(jìn)_coreImageContext內(nèi)
_coreImageContext = [CIContext contextWithEAGLContext:self.context];
[EAGLContext setCurrentContext:self.context];
- 遵循
AVCaptureVideoDataOutputSampleBufferDelegate
代理,捕獲到圖像之后株憾,會(huì)調(diào)用以下方法
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
調(diào)用CIDetector
進(jìn)行識(shí)別,并且獲取最大不規(guī)則四邊形
// 從緩沖區(qū)中獲取CIImage
CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage *image = [CIImage imageWithCVPixelBuffer:pixelBuffer];
// 用高精度邊緣識(shí)別器 識(shí)別特征
NSArray <CIFeature *>*features = [[self highAccuracyRectangleDetector] featuresInImage:image];
// 選取特征列表中最大的不規(guī)則四邊形
_borderDetectLastRectangleFeature = [self biggestRectangleInRectangles:features];
- 識(shí)別到邊緣之后使用
CAShapeLayer
將邊緣繪制并顯示
// 繪制邊緣檢測(cè)圖層
- (void)drawBorderDetectRectWithImageRect:(CGRect)imageRect topLeft:(CGPoint)topLeft topRight:(CGPoint)topRight bottomLeft:(CGPoint)bottomLeft bottomRight:(CGPoint)bottomRight
{
if (!_rectOverlay) {
_rectOverlay = [CAShapeLayer layer];
_rectOverlay.fillRule = kCAFillRuleEvenOdd;
_rectOverlay.fillColor = [UIColor colorWithRed:73/255.0 green:130/255.0 blue:180/255.0 alpha:0.4].CGColor;
_rectOverlay.strokeColor = [UIColor whiteColor].CGColor;
_rectOverlay.lineWidth = 5.0f;
}
if (!_rectOverlay.superlayer) {
self.layer.masksToBounds = YES;
[self.layer addSublayer:_rectOverlay];
}
// 將圖像空間的坐標(biāo)系轉(zhuǎn)換成uikit坐標(biāo)系
TransformCIFeatureRect featureRect = [self transfromRealRectWithImageRect:imageRect topLeft:topLeft topRight:topRight bottomLeft:bottomLeft bottomRight:bottomRight];
// 邊緣識(shí)別路徑
UIBezierPath *path = [UIBezierPath new];
[path moveToPoint:featureRect.topLeft];
[path addLineToPoint:featureRect.topRight];
[path addLineToPoint:featureRect.bottomRight];
[path addLineToPoint:featureRect.bottomLeft];
[path closePath];
// 背景遮罩路徑
UIBezierPath *rectPath = [UIBezierPath bezierPathWithRect:CGRectMake(-5,
-5,
self.frame.size.width + 10,
self.frame.size.height + 10)];
[rectPath setUsesEvenOddFillRule:YES];
[rectPath appendPath:path];
_rectOverlay.path = rectPath.CGPath;
}
即可顯示出實(shí)時(shí)識(shí)別的效果了
- 最后拍照之后的裁剪较曼,使用該濾鏡將識(shí)別出的不規(guī)則四邊形轉(zhuǎn)換成矩形须尚,即可轉(zhuǎn)換成正正方方的矩形了
/// 將任意四邊形轉(zhuǎn)換成長(zhǎng)方形
- (CIImage *)correctPerspectiveForImage:(CIImage *)image withFeatures:(CIRectangleFeature *)rectangleFeature
{
NSMutableDictionary *rectangleCoordinates = [NSMutableDictionary new];
rectangleCoordinates[@"inputTopLeft"] = [CIVector vectorWithCGPoint:rectangleFeature.topLeft];
rectangleCoordinates[@"inputTopRight"] = [CIVector vectorWithCGPoint:rectangleFeature.topRight];
rectangleCoordinates[@"inputBottomLeft"] = [CIVector vectorWithCGPoint:rectangleFeature.bottomLeft];
rectangleCoordinates[@"inputBottomRight"] = [CIVector vectorWithCGPoint:rectangleFeature.bottomRight];
return [image imageByApplyingFilter:@"CIPerspectiveCorrection" withInputParameters:rectangleCoordinates];
}
// TODO : 識(shí)別出邊緣之后,可以手動(dòng)設(shè)置邊緣范圍
手動(dòng)設(shè)置邊緣范圍
Demo地址: github傳送門(mén):https://github.com/madaoCN/MADRectDetect 好心人點(diǎn)下Star唄