自定義相機分一下幾個步驟
1,判斷當(dāng)前相機設(shè)備是否可用與是否授權(quán)
2,自定義相機的相關(guān)參數(shù)
3,相機切換與閃光燈
4,拍照處理
授權(quán)及設(shè)備判斷
1,攝像頭是否可用
//相機是否可用
func isCameraAvailable() -> Bool {
return UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)
}
//前置攝像頭是否可用
func isFrontCameraAvailable() -> Bool {
return UIImagePickerController.isCameraDeviceAvailable(UIImagePickerControllerCameraDevice.front)
}
//后置攝像頭是否可用
func isBackCameraAvailable() -> Bool {
return UIImagePickerController.isCameraDeviceAvailable(UIImagePickerControllerCameraDevice.rear)
}
2,用戶是否授權(quán)
//判斷相機是否授權(quán)
func isCanUseCamera()->Bool{
let status = AVCaptureDevice.authorizationStatus(for: AVMediaType.video)
if status == AVAuthorizationStatus.authorized {
return true
}
return false
}
相機參數(shù)配置
1,基礎(chǔ)配置
//設(shè)備
device = AVCaptureDevice.default(for: AVMediaType.video)
//輸入源
input = try! AVCaptureDeviceInput.init(device: device)
//輸出
output = AVCaptureStillImageOutput.init();
//會話
session = AVCaptureSession.init()
if (session.canAddInput(input)) {
session.addInput(input)
}
if session.canAddOutput(output) {
session.addOutput(output)
}
let layer = AVCaptureVideoPreviewLayer.init(session: session)
session .startRunning()
2,可選配置
if session .canSetSessionPreset(AVCaptureSession.Preset.photo) {
//該項用來設(shè)置輸出圖像的質(zhì)量
session.sessionPreset = AVCaptureSession.Preset.photo
}
try! device.lockForConfiguration() //鎖住設(shè)備
if device.isFlashModeSupported(AVCaptureDevice.FlashMode.auto) {
//設(shè)置閃光燈樣式
device.flashMode = AVCaptureDevice.FlashMode.auto
}
if device.isWhiteBalanceModeSupported(AVCaptureDevice.WhiteBalanceMode.autoWhiteBalance) {
//設(shè)置白平衡樣式
device.whiteBalanceMode = AVCaptureDevice.WhiteBalanceMode.autoWhiteBalance
}
//解鎖設(shè)備
device.unlockForConfiguration()
拍攝
func takePhoto(){
let connection = output.connection(with: AVMediaType.video)
if connection == nil {
print("拍攝失敗")
return
}
output.captureStillImageAsynchronously(from: connection!) { (buffer, error) in
let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer!)
}
}
實時濾鏡相機
要實現(xiàn)實時濾鏡效果,則需要獲得相機捕獲的每一幀,并進行加濾鏡的操作
1,改變輸出源頭
output = AVCaptureVideoDataOutput.init()
//設(shè)置代理與回調(diào)隊列
output.setSampleBufferDelegate(self, queue: queue)
//設(shè)置回調(diào)獲得的圖像參數(shù)(這里設(shè)置為32位BGR格式)還可以設(shè)置寬高等等
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String:NSNumber.init(value: kCVPixelFormatType_32BGRA)]
2,回調(diào)代理方法
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//這里獲得當(dāng)前幀的圖像 可以對其進行加工展示 實現(xiàn) 實時濾鏡的效果(在這里我使用的GPUImage2的濾鏡)
let im = self.imageFromSampleBuffer(sampleBuffer: sampleBuffer)
// 創(chuàng)建圖片輸入
let brightnessAdjustment = BrightnessAdjustment()
brightnessAdjustment.brightness = 0.2
let pictureInput = PictureInput(image: im)
// 創(chuàng)建圖片輸出
let pictureOutput = PictureOutput()
// 給閉包賦值
pictureOutput.imageAvailableCallback = { image in
// 這里的image是處理完的數(shù)據(jù),UIImage類型
OperationQueue.main.addOperation {
self.imv.image = image.imageRotatedByDegrees(degrees: 90, flip: false)
}
}
// 綁定處理鏈
pictureInput --> brightnessAdjustment --> pictureOutput
// 開始處理 synchronously: true 同步執(zhí)行 false 異步執(zhí)行军俊,處理完畢后會調(diào)用imageAvailableCallback這個閉包
pictureInput.processImage(synchronously: true)
}
補充buffer轉(zhuǎn)換為UIImage 和 UIImage進行旋轉(zhuǎn)(因為得到處理的圖片需要旋轉(zhuǎn)才正確)的方法 (代碼為Swift4.0版本)
extension UIImage {
// false為旋轉(zhuǎn)(面向圖片順時針) true為逆時針
public func imageRotatedByDegrees(degrees: CGFloat, flip: Bool) -> UIImage {
let radiansToDegrees: (CGFloat) -> CGFloat = {
return $0 * (180.0 / CGFloat(M_PI))
}
let degreesToRadians: (CGFloat) -> CGFloat = {
return $0 / 180.0 * CGFloat(M_PI)
}
// calculate the size of the rotated view's containing box for our drawing space
let rotatedViewBox = UIView(frame: CGRect(origin: CGPoint.zero, size: size))
let t = CGAffineTransform(rotationAngle: degreesToRadians(degrees));
rotatedViewBox.transform = t
let rotatedSize = rotatedViewBox.frame.size
// Create the bitmap context
UIGraphicsBeginImageContext(rotatedSize)
let bitmap = UIGraphicsGetCurrentContext()
// Move the origin to the middle of the image so we will rotate and scale around the center.
bitmap?.translateBy(x: rotatedSize.width / 2.0, y: rotatedSize.height / 2.0)
// // Rotate the image context
bitmap?.rotate(by: degreesToRadians(degrees))
// Now, draw the rotated/scaled image into the context
var yFlip: CGFloat
if(flip){
yFlip = CGFloat(-1.0)
} else {
yFlip = CGFloat(1.0)
}
bitmap?.scaleBy(x: yFlip, y: -1.0)
bitmap?.draw(self.cgImage!, in: CGRect.init(x: -size.width / 2, y: -size.height / 2, width: size.width, height: size.height))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
func imageFromSampleBuffer(sampleBuffer : CMSampleBuffer) -> UIImage
{
// Get a CMSampleBuffer's Core Video image buffer for the media data
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer
CVPixelBufferLockBaseAddress(imageBuffer!, CVPixelBufferLockFlags.readOnly);
// Get the number of bytes per row for the pixel buffer
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer!);
// Get the number of bytes per row for the pixel buffer
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!);
// Get the pixel buffer width and height
let width = CVPixelBufferGetWidth(imageBuffer!);
let height = CVPixelBufferGetHeight(imageBuffer!);
// Create a device-dependent RGB color space
let colorSpace = CGColorSpaceCreateDeviceRGB();
// Create a bitmap graphics context with the sample buffer data
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Little.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedFirst.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
//let bitmapInfo: UInt32 = CGBitmapInfo.alphaInfoMask.rawValue
let context = CGContext.init(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
// Create a Quartz image from the pixel data in the bitmap graphics context
let quartzImage = context?.makeImage();
// Unlock the pixel buffer
CVPixelBufferUnlockBaseAddress(imageBuffer!, CVPixelBufferLockFlags.readOnly);
// Create an image object from the Quartz image
let image = UIImage.init(cgImage: quartzImage!);
return image
}