Mac和iOS支持使用VideoToolBox硬件編解碼H264和H265的視頻流痕鳍,這次使用FFmpeg解封裝使用VideoToolBox解碼器解碼赔退,還有從Mac采集的數(shù)據(jù)用FFmpeg編碼封裝氯夷。
FFmpeg解封裝+VideoToolBox解碼
FFmpeg中AVPacket對應(yīng)Mac中的CMBlockBufferRef
//用于解析AVCodecContext->extradata中的sps和pps
static void parseH264SequenceHeader(uint8_t * extra_data, uint8_t ** sps, size_t * sps_size, uint8_t ** pps, size_t * pps_size) {
int spsSize = (extra_data[6] << 8) + extra_data[7];
*sps_size = spsSize;
*sps = &extra_data[8];
int ppsSize = (extra_data[8 + spsSize + 1] << 8) + extra_data[8 + spsSize + 2];
*pps = &extra_data[8 + spsSize + 3];
*pps_size = ppsSize;
}
- (void)main {
...
//初始化VideoToolBox解碼器
parseH264SequenceHeader(codec_ctx->extradata, &sps, &sps_size, &pps, &pps_size);
uint8_t * parameterSetPointers[2] = {sps, pps};
size_t parameterSetSizes[2] = {sps_size, pps_size};
OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault, 2, (const uint8_t * const *)parameterSetPointers, parameterSetSizes, 4, &formatDescRef);
if (status != noErr) {
NSLog(@"Create video description failed...");
}
VTDecompressionOutputCallbackRecord callback;
callback.decompressionOutputCallback = didDecompress;
callback.decompressionOutputRefCon = (__bridge void *)self;
NSDictionary * destinationImageBufferAttributes = @{(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_420YpCbCr8Planar)};
status = VTDecompressionSessionCreate(kCFAllocatorDefault, formatDescRef, NULL, (__bridge CFDictionaryRef)destinationImageBufferAttributes, &callback, &sessionRef);
if (status != noErr) {
NSLog(@"Create decompression session failed status = %@", @(status));
}
...
AVPacket * pkt = av_packet_alloc();
while (av_read_frame(format_ctx, pkt) >= 0) {
if (pkt->stream_index == video_index) {
CMBlockBufferRef blockBuffer = NULL;
//使用AVPacket中的數(shù)據(jù)直接創(chuàng)建BlockBuffer纵朋,AVPacket中的nalu數(shù)據(jù)需要AVCC格式雷客,如果是Annex B格式要轉(zhuǎn)換成AVCC格式
OSStatus status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, pkt->data, pkt->size, kCFAllocatorNull, NULL, 0, pkt->size, 0, &blockBuffer);
if (status != kCMBlockBufferNoErr) {
NSLog(@"Create BlockBuffer failed status = %@", @(status));
}
const size_t sampleSize = pkt->size;
CMSampleBufferRef sampleBuffer = NULL;
status = CMSampleBufferCreate(kCFAllocatorDefault, blockBuffer, true, NULL, NULL, formatDescRef, 1, 0, NULL, 1, &sampleSize, &sampleBuffer);
if (status != noErr) {
NSLog(@"SampleBuffer create failed");
}
VTDecodeFrameFlags flags = kVTDecodeFrame_EnableAsynchronousDecompression;
VTDecodeInfoFlags flagOut;
status = VTDecompressionSessionDecodeFrame(sessionRef, sampleBuffer, flags, &sampleBuffer, &flagOut);
if (status == noErr) {
VTDecompressionSessionWaitForAsynchronousFrames(sessionRef);
}
CFRelease(blockBuffer);
CFRelease(sampleBuffer);
}
}
}
//解碼回調(diào)方法
void didDecompress( void *decompressionOutputRefCon, void *sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags infoFlags, CVImageBufferRef imageBuffer, CMTime presentationTimeStamp, CMTime presentationDuration ) {
if (status == noError && imageBuffer) {
for (int i = 0; i < CVPixelBufferGetPlaneCount(imageBuffer); i++) {
void * baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, i);
size_t bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, i);
size_t height = CVPixelBufferGetHeightOfPlane(imageBuffer, i);
fwrite(baseAddress, bytesPerRow * height, 1, file);
//這里直接寫入yuv文件會出現(xiàn)問題裙秋,VideoToolBox解碼得到的數(shù)據(jù)是按照DTS順序容燕,需要按照PTS排序然后再寫入yuv文件
}
}
}
7D30E197-D064-4763-9D20-F734B7BBBD37.png
Mac采集+FFmpeg編碼
- (void)initEncoder {
format_ctx = avformat_alloc_context();
if (avformat_alloc_output_context2(&format_ctx, NULL, NULL, outputString.UTF8String) < 0) {
NSLog(@"Open output path failed");
}
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
codec_ctx = avcodec_alloc_context3(codec);
codec_ctx->bit_rate = 5000000;
codec_ctx->width = 1280; //使用AVFoundation設(shè)置攝像頭采集視頻的寬和高
codec_ctx->height = 720;
codec_ctx->time_base = (AVRational){1, 24}; //視頻為24幀
codec_ctx->framerate = (AVRational){24, 1};
codec_ctx->gop_size = 10;
codec_ctx->max_b_frames = 1;
codec_ctx->pix_fmt = AV_PIX_FMT_NV12; //Mac和iPhone攝像頭采集的pixel format為NV12
codec_ctx->color_range = AVCOL_RANGE_JPEG;
av_opt_set(codec_ctx->priv_data, "present", "slow", 0);
if (format_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; //填充codec_ctx中extradata
}
AVStream * stream = avformat_new_stream(format_ctx, NULL); //創(chuàng)建視頻流
ret = avcodec_parameters_from_context(stream->codecpar, codec_ctx);
if (ret < 0) {
NSLog(@"Failed to copy encoder parameters to output stream 0");
}
stream->time_base = codec_ctx->time_base;
av_dump_format(format_ctx, 0, outputString.UTF8String, 1);
if (!(format_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&format_ctx->pb, outputString.UTF8String, AVIO_FLAG_WRITE);
if (ret < 0) {
NSLog(@"Could not open output file");
}
}
ret = avformat_write_header(format_ctx, NULL);
if (ret < 0) {
NSLog(@"Error occurred when opening output file");
}
}
//攝像頭采集回調(diào)方法
- (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CMTime duration = CMSampleBufferGetDuration(sampleBuffer);
AVFrame * frame = av_frame_alloc(); //創(chuàng)建AVFrame存儲像素數(shù)據(jù)
frame->height = (int)CVPixelBufferGetHeight(imageBuffer);
frame->width = (int)CVPixelBufferGetWidth(imageBuffer);
frame->format = AV_PIX_FMT_NV12;
frame->color_range = AVCOL_RANGE_JPEG;
av_frame_get_buffer(frame, 0); //為frame中存儲像素數(shù)據(jù)的data分配空間梁呈,調(diào)用這個方法之前要設(shè)置pixel format(視頻)或者sample format(音頻),視頻的寬高蘸秘,音頻的nb_samples和channel_layout
CVPixelBufferLockBaseAddress(imageBuffer, 0);
void * baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
size_t bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
size_t height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
frame->linesize[0] = (int)bytesPerRow;
memcpy(frame->data[0], baseAddress, bytesPerRow * height);
baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 1);
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 1);
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 1);
frame->linesize[1] = (int)bytesPerRow;
memcpy(frame->data[1], baseAddress, bytesPerRow * height);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
int ret = avcodec_send_frame(codec_ctx, frame);
AVPacket * pkt = av_packet_alloc();
while (ret >= 0) {
ret = avcodec_receive_packet(codec_ctx, pkt);
if (ret == AVERROR(EAGAIN)) {
NSLog(@"Output is not available in the current state");
break;
} else if (ret == AVERROR_EOF) {
NSLog(@"The encoder has been fully flushed, and there will be no more output packets");
break;
} else if (ret < 0) {
NSLog(@"Error during encoding");
break;
}
pkt->stream_index = 0;
pkt->pts = _pts;
pkt->dts = pkt->pts;
pkt->duration = duration.value;
_pts++;
av_packet_rescale_ts(pkt, codec_ctx->time_base, format_ctx->streams[0]->time_base);
if (av_write_frame(format_ctx, pkt) >= 0) {
NSLog(@"Write success");
}
}
av_packet_free(&pkt);
av_frame_free(&frame);
}
- (void)EndRecord {
[_captureSession stopRunning];
int ret = avcodec_send_frame(codec_ctx, NULL); //flush data
AVPacket * pkt = av_packet_alloc();
while (ret >= 0) {
ret = avcodec_receive_packet(codec_ctx, pkt);
if (ret == AVERROR(EAGAIN)) {
NSLog(@"Output is not available in the current state");
break;
} else if (ret == AVERROR_EOF) {
NSLog(@"The encoder has been fully flushed, and there will be no more output packets");
break;
} else if (ret < 0) {
NSLog(@"Error during encoding");
break;
}
pkt->stream_index = 0;
pkt->pts = _pts;
pkt->dts = pkt->pts;
pkt->duration = duration.value;
av_packet_rescale_ts(pkt, codec_ctx->time_base, format_ctx->streams[0]->time_base);
if (av_write_frame(format_ctx, pkt) >= 0) {
NSLog(@"Write success");
}
_pts++;
}
avcodec_close(codec_ctx);
av_write_trailer(format_ctx);
}