一瓢对、概要說明
網(wǎng)絡流播放時,IJKPlayer新增錄制mp4功能,因為是實時流,需要逐幀編碼,當然也可以在播放器未解碼前,逐幀寫入到文件,但是不能確定讀到的是什么格式的流,直接逐幀寫入到文件怕不能使用,故采用解碼后的流逐幀編碼再寫入的方式錄制視頻.
二、概要設計
三剥哑、音視頻數(shù)據(jù)回調
需要實現(xiàn)這個功能,重點修改代碼在ffplayer里面
1攒至、在視頻解碼處把視頻原始數(shù)據(jù)及pts回調出去,在queue_picture方法中,搜索SDL_VoutFillFrameYUVOverlay,在下面添加:
??// FIXME: set swscale options
if?(SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame) <?0) {
????av_log(NULL, AV_LOG_FATAL,?"Cannot initialize the conversion context\n");
????exit(1);
}
//新增代碼
if?(ffp -> videotoolbox) {
??// TODO edit
????ffp_pixelbuffer_lock(ffp);
????ffp->szt_pixelbuffer = SDL_VoutOverlayVideoToolBox_GetCVPixelBufferRef(vp->bmp);?// picture->opaque;
????if?(s_pixel_buff_callback)
//??????????????? ffp->stat.vdps
????????s_pixel_buff_callback(ffp->inject_opaque, ffp->szt_pixelbuffer, vp->pts*1000*1000);
????ffp_pixelbuffer_unlock(ffp);
????if?(!ffp->szt_pixelbuffer) {
????????ALOGE("nil pixelBuffer in overlay\n");
????}
}
2裆站、在音頻解碼后回調音頻PCM數(shù)據(jù),ffmpeg中回調的音頻是32位float,但是試了下在iOS中不能正常編碼,故先轉格式為16位Int型,在int?audio_thread(void?*arg)中,在do{}while循環(huán)中,增加如下代碼:
if?(is->swr_ctx) {
?uint8_t *targetData[1];
?int?len =?5760*2;?//這里這么寫定死長度可能會有問題
?targetData[0] = (uint8_t *)malloc(len);
?int?size = audio_swr_resampling_audio(is->swr_ctx, context, frame, targetData);
?tb = (AVRational){1, frame->sample_rate};
?ffp_pcm_lock(ffp);
?int?pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
?uint8_t *audioData = frame->data[0];
?int?audioDataSize = frame->linesize[0];
?if?(s_pcm_callback) {
?s_pcm_callback(ffp->inject_opaque, pts, frame->format, frame->nb_samples, frame->channels, frame->sample_rate, frame->channel_layout,?1, size, targetData[0]);
?//? ? ? ? ? ? ? ? s_pcm_callback(audioData, audioDataSize, ffp_get_current_position_l(ffp)*1000);
?}
?free(targetData[0]);
?ffp_pcm_unlock(ffp);
?}
在解碼前對音頻做轉格式初始化:
AVCodecContext *context = NULL;
?for?(int?i =?0; i < is->ic->nb_streams; i++) {
?// 對照輸入流創(chuàng)建輸出流通道
?AVStream *in_stream = is->ic->streams[i];
?if?(in_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
?context = in_stream->codec;
?}
?}
?audio_swr_resampling_audio_init(&is->swr_ctx, context);
32位float型音頻轉位16位Int型音頻代碼:
void?audio_swr_resampling_audio_destory(SwrContext **swr_ctx){
?if(*swr_ctx){
?swr_free(swr_ctx);
?*swr_ctx = NULL;
?}
}
void?audio_swr_resampling_audio_init(SwrContext **swr_ctx,AVCodecContext *codec){
?if(codec->sample_fmt == AV_SAMPLE_FMT_S16 || codec->sample_fmt == AV_SAMPLE_FMT_S32 ||codec->sample_fmt == AV_SAMPLE_FMT_U8){
?av_log(NULL, AV_LOG_ERROR,?"codec->sample_fmt:%d",codec->sample_fmt);
?if(*swr_ctx){
?swr_free(swr_ctx);
?*swr_ctx = NULL;
?}
?return;
?}
?if(*swr_ctx){
?swr_free(swr_ctx);
?}
?*swr_ctx = swr_alloc();
?if(!*swr_ctx){
?av_log(NULL, AV_LOG_ERROR,?"swr_alloc failed");
?return;
?}
?/* set options */
?av_opt_set_int(*swr_ctx,?"in_channel_layout",? ? codec->channel_layout,?0);
?av_opt_set_int(*swr_ctx,?"out_channel_layout",? ? codec->channel_layout,?0);
?av_opt_set_int(*swr_ctx,?"in_sample_rate", ? ? ? codec->sample_rate,?0);
?av_opt_set_int(*swr_ctx,?"out_sample_rate", ? ? ? codec->sample_rate,?0);
?av_opt_set_sample_fmt(*swr_ctx,?"in_sample_fmt", codec->sample_fmt,?0);
?av_opt_set_sample_fmt(*swr_ctx,?"out_sample_fmt", AV_SAMPLE_FMT_S16,?0);// AV_SAMPLE_FMT_S16
?/* initialize the resampling context */
?int?ret =?0;
?if?((ret = swr_init(*swr_ctx)) <?0) {
?av_log(NULL, AV_LOG_ERROR,?"Failed to initialize the resampling context\n");
?if(*swr_ctx){
?swr_free(swr_ctx);
?*swr_ctx = NULL;
?}
?return;
?}
}
int?audio_swr_resampling_audio(SwrContext *swr_ctx,AVCodecContext *codec,AVFrame *audioFrame,uint8_t **targetData){
?uint8_t **extendedData = audioFrame->data;
?int?len = swr_convert(swr_ctx,targetData,audioFrame->nb_samples,extendedData,audioFrame->nb_samples);
?if(len <?0){
?av_log(NULL, AV_LOG_ERROR,?"error swr_convert");
?goto?end;
?}
?int?dst_bufsize = len * codec->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
//? ? av_log(NULL, AV_LOG_INFO, " dst_bufsize:%d",dst_bufsize);
?return?dst_bufsize;
?end:
?return?-1;
}
至此,音視頻原始數(shù)據(jù)已經(jīng)回調到上層了
四信柿、逐幀編碼及寫入
視頻回調到上層已經(jīng)是CVPixelBufferRef格式了,無需另外操作,直接使用AVAssetWriter寫入到文件就行
CMTime tm = CMTimeMake(pts,?1000?*?1000);
BOOL success = [self.pixelBuffAdptor appendPixelBuffer:pixelBuff withPresentationTime:tm];
NSLog(@"___%d", success);
但是音頻回調到上層是uint8_t *, 需要多一次音頻數(shù)據(jù)處理變?yōu)閕OS中適用的CMSampleBufferRef, 代碼如下:
IJKWeakHolder *weakHolder = (__bridge IJKWeakHolder*)opaque;
?TCLIJKPlayer *mpc = weakHolder.object;
?if?(!mpc) {
?return?0;
?}
?uint8_t *targetData = malloc(data_lineSize + mpc->lastCount);
?memcpy(targetData, mpc->lastData, mpc->lastCount);
?memcpy(targetData + mpc->lastCount, data, data_lineSize);
?int?len =?2048;
?if(data_lineSize + mpc->lastCount < len) {
?memcpy(mpc->lastData + mpc->lastCount, data, data_lineSize);
?mpc->lastCount = data_lineSize + mpc->lastCount;
?free(targetData);
?return?0;
?}
?for?(int?i =?0; i <= (data_lineSize + mpc->lastCount)/len; i++) {
?if?((i+1)*len > (data_lineSize + mpc->lastCount)) {
?mpc->lastCount = (data_lineSize + mpc->lastCount) - i*len;
?memcpy(mpc->lastData, targetData + i*len, mpc->lastCount);
?}?else?{
?uint8_t *dst = malloc(len);
?memcpy(dst, targetData + i*len, len);
?CMSampleBufferRef buffer = createAudioSample(dst, len, pts, channels, sample_rate);
?free(dst);
?if?(mpc.delegate && [mpc.delegate respondsToSelector:@selector(onAudioSampleBuffer:)]) {
?id buffRef = (__bridge id _Nullable)buffer;
?dispatch_async(dispatch_get_main_queue(), ^{
?[mpc.delegate onAudioSampleBuffer:(__bridge CMSampleBufferRef)(buffRef)];
?});
?}
?CFRelease(buffer);
?}
?}
?free(targetData);
static?CMSampleBufferRef createAudioSample(void?*audioData, UInt32 len,?double?pts,?int?channels,?int?sample_rate)
{
????int?mDataByteSize = len;
????AudioBufferList audioBufferList;
????audioBufferList.mNumberBuffers =?1;
????audioBufferList.mBuffers[0].mNumberChannels=channels;
????audioBufferList.mBuffers[0].mDataByteSize=mDataByteSize;
????audioBufferList.mBuffers[0].mData = audioData;
????AudioStreamBasicDescription asbd;
????asbd.mSampleRate = sample_rate;
????asbd.mFormatID = kAudioFormatLinearPCM;
????asbd.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger;
????asbd.mChannelsPerFrame = channels;
????asbd.mBitsPerChannel =?16;
????asbd.mFramesPerPacket =?1;
????asbd.mBytesPerFrame = asbd.mBitsPerChannel /?8?* asbd.mChannelsPerFrame;
????asbd.mBytesPerPacket = asbd.mBytesPerFrame * asbd.mFramesPerPacket;
????asbd.mReserved =?0;
????static?CMFormatDescriptionRef format = NULL;
????CMSampleTimingInfo timing = {CMTimeMake(1, sample_rate), kCMTimeZero, kCMTimeInvalid };
????OSStatus error =?0;
????error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &asbd,?0, NULL,?0, NULL, NULL, &format);
????CMSampleBufferRef buff = NULL;
????error = CMSampleBufferCreate(kCFAllocatorDefault, NULL,?false, NULL, NULL, format, (CMItemCount)mDataByteSize/(2*channels),?1, &timing,?0, NULL, &buff);
????CFRelease(format);
????if?( error ) {
????????NSLog(@"CMSampleBufferCreate returned error: %ld", (long)error);
????????return?NULL;
????}
????error = CMSampleBufferSetDataBufferFromAudioBufferList(buff, kCFAllocatorDefault, kCFAllocatorDefault,?0, &audioBufferList);
????if( error )
????{
????????NSLog(@"CMSampleBufferSetDataBufferFromAudioBufferList returned error: %ld", (long)error);
????????return?NULL;
????}
????return?buff;
}
再使用AVAssetWriter寫入到mp4
BOOL success = [self.assetWriterAudioInput appendSampleBuffer:sampleBuffer];
NSLog(@"-------%d", success);
if?(!success) {
????@synchronized(self) {
????????[self stopWrite:nil];
????????[self destroyWrite];
????}
}