兩個注意點
在初始化AVCaptureSession時設(shè)置輸出圖像尺寸
AVCaptureSession *session = [[AVCaptureSession alloc] init];
session.sessionPreset = AVCaptureSessionPreset640x480;
在設(shè)置AVCaptureVideoDataOutput時要注意設(shè)置YUV輸出而不是RGB
AVCaptureVideoDataOutput * dataOutput = [[AVCaptureVideoDataOutput alloc] init];
[dataOutput setVideoSettings:@{(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)}];
在Camera的代理方法中可以拿到CMSampleBufferRef
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
<!--sampleBuffer就是實際攝像頭輸出的實時數(shù)據(jù)-->
}
//順時針旋轉(zhuǎn)90度
//M N 分別代表寬和高
void Matrix_Rotate_90(uint8_t *src,uint8_t *dst,int M ,int N)
{
for(int i=0;i<M;i++)
for(int j=0;j<N;j++)
{
dst[i*N+j]=src[(N-1-j)*M+i];
}
}
轉(zhuǎn)灰度圖像數(shù)據(jù)
CVImageBufferRef imageBufferRef=CMSampleBufferGetImageBuffer(sampleBuffer);
<!--NSLog(@"查看寬%zu * 高%zu", CVPixelBufferGetWidth(imageBufferRef), CVPixelBufferGetHeight(imageBufferRef));-->
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
unsigned char *ptr_image=CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
<!--這里640 * 480 即開始設(shè)置session輸出的AVCaptureSessionPreset640x480-->
unsigned char* grayImg = (unsigned char *)malloc(sizeof(unsigned char) * 640 * 480 * 4);
<!--攝像頭出來的數(shù)據(jù)通常都是橫著的,需要轉(zhuǎn)一下角度挥等,之后灰度圖就可以傳給人臉識別算法用了-->
Matrix_Rotate_90(ptr_image,grayImg,640,480);