先看一下需要申明的屬性已經(jīng)引入的頭文件
#import "avformat.h"
#import "time.h"
#import "swresample.h"
#import "samplefmt.h"
#import <VideoToolbox/VideoToolbox.h>
#import "CPCameraMetaInfo.h"
@implementation CPCameraRTSPDecoder {
AVFormatContext *m_pFmtCtx;
BOOL mbAbort; // CPCameraRTSPDecoder 是否中止
BOOL mbEOF; // EOF = end of file 表示是否讀完數(shù)據(jù)包
int64_t miReadTimeUs; // 當(dāng)前時(shí)間 or 讀取時(shí)間
int miProbesize; // 讀取大小
int miMaxAnalyzeDuration; // 讀取解析時(shí)長(zhǎng)
int mAudioIndex; // 音頻流在stream數(shù)組中的位置
int mVideoIndex; // 視頻流在stream數(shù)組中的位置
AVStream *mAudioStream; // 把找到的音頻流保存在屬性中
AVCodecContext *mAudioCodecCtx; // 把找到的音頻流Ctx保存在屬性中
AVStream *mVideoStream;
AVCodecContext *mVideoCodecCtx;
CPCameraMetaInfo *mMetaInfo;
struct SwrContext *m_pSwrCtx;
}
構(gòu)造方法中初始化FFMpge
- 如果是讀取本地文件則不需要調(diào)用
avformat_network_init();
- (instancetype)initWithUrl:(NSString *)url {
if (self = [super init]) {
_url = url;
mbAbort = true;
mbEOF = false;
miProbesize = 0;
mAudioIndex = -1; // -1表示未找到音頻流
mVideoIndex = -1;
mMetaInfo = [CPCameraMetaInfo new];
self.state = CPCameraRTSPDecoderStateConecting;
av_register_all();
avcodec_register_all();
avformat_network_init();
// [self openMedia];
}
return self;
}
傳入RTSP流在FFmpge中解析,驗(yàn)證是否符合解碼要求
- (void)openMedia {
// 打開(kāi)文件
if (![self openUrl]) {
self.state = CPCameraRTSPDecoderStateConnectionFailed;
[self closeMedia];
return;
}
// 查找流
if (![self findStreamInfo]) {
self.state = CPCameraRTSPDecoderStateConnectionFailed;
[self closeMedia];
return;
}
// 解析流
if (![self openStreams]) {
self.state = CPCameraRTSPDecoderStateConnectionFailed;
[self closeMedia];
return;
}
self.state = CPCameraRTSPDecoderStatekStateReadyToPlay;
}
- 1.檢測(cè)是否能開(kāi)啟url or file
- (BOOL)openUrl {
/// 打開(kāi)文件 avformat_open_input()
m_pFmtCtx = avformat_alloc_context();
/// 獲取當(dāng)前時(shí)間 time.h
int64_t time_s = av_gettime();
miReadTimeUs = time_s;
/// 設(shè)置為非阻塞模式
m_pFmtCtx->flags |= AVFMT_FLAG_NONBLOCK;
/// 設(shè)置讀取大小
if (miProbesize)
{
m_pFmtCtx->probesize = miProbesize;
m_pFmtCtx->max_analyze_duration = miMaxAnalyzeDuration;
}
AVDictionary *options = NULL;
av_dict_set(&options, "rtsp_transport", "tcp", 0);
if ((avformat_open_input(&m_pFmtCtx, [_url UTF8String], NULL, &options)) != 0) {
[self sendErrorMsg:@"avformat_open_input faill"];
return NO;
}
return YES;
}
- 該函數(shù)主要用于獲取視頻流信息兼砖。在一些格式當(dāng)中沒(méi)有頭部信息,如flv格式肉康,h264格式,這個(gè)時(shí)候調(diào)用avformat_open_input()在打開(kāi)文件之后就沒(méi)有參數(shù)遥赚,也就無(wú)法獲取到里面的信息鱼蝉。這個(gè)時(shí)候就可以調(diào)用此函數(shù)芦劣,因?yàn)樗鼤?huì)試著去探測(cè)文件的格式,但是如果格式當(dāng)中沒(méi)有頭部信息御板,那么它只能獲取到編碼锥忿、寬高這些信息,還是無(wú)法獲得總時(shí)長(zhǎng)怠肋。如果總時(shí)長(zhǎng)無(wú)法獲取到敬鬓,則仍需要把整個(gè)文件讀一遍,計(jì)算一下它的總幀數(shù)笙各。
- (BOOL)findStreamInfo {
if (avformat_find_stream_info(m_pFmtCtx, NULL) < 0)
{
[self sendErrorMsg:@"avformat_find_stream_info faill"];
return NO;
}
NSString *des = [NSString stringWithFormat:@"nb_streams:%u duration = %lld",m_pFmtCtx->nb_streams,m_pFmtCtx->duration];
return YES;
}
- (BOOL)openStreams {
for (int i = 0; i < m_pFmtCtx->nb_streams; ++i)
{
// AVDISCARD_ALL 過(guò)濾了所有的流中的數(shù)據(jù)
// AVDISCARD_DEFAULT 過(guò)濾流中的無(wú)效數(shù)據(jù)
m_pFmtCtx->streams[i]->discard = AVDISCARD_ALL;
}
// 查找音視頻流的具體位置
for (int i = 0; i < m_pFmtCtx->nb_streams; ++i)
{
if (AVMEDIA_TYPE_AUDIO == m_pFmtCtx->streams[i]->codec->codec_type &&
mAudioIndex < 0)
{
mAudioIndex = i;
}
if (AVMEDIA_TYPE_VIDEO == m_pFmtCtx->streams[i]->codec->codec_type &&
mVideoIndex < 0)
{
mVideoIndex = i;
}
}
// 找到了視頻流
if (mVideoIndex >= 0)
{
if ([self streamComponentOpen:mVideoIndex])
{
mMetaInfo.mbVideoOk = true;
} else {
// 無(wú)法解析
return NO;
}
}
// 找到了音頻流
if (mAudioIndex >= 0)
{
if ([self streamComponentOpen:mAudioIndex])
{
mMetaInfo.mbAudioOk = true;
} else {
// 無(wú)法解析,音頻不能解析也繼續(xù)走流程
mAudioIndex = -1;
}
}
return YES;
}
- (BOOL)streamComponentOpen:(int)streamindex {
AVStream *stream = m_pFmtCtx->streams[streamindex];
AVCodecContext *codec_ctx;
AVCodec *codec;
// 尋找合適的解碼器
if (!(codec = avcodec_find_decoder(stream->codecpar->codec_id)))
{
// 查找失敗
NSString *err = [NSString stringWithFormat:@"avcodec_find_decoder() could not find decoder, name: %s ",avcodec_get_name(stream->codecpar->codec_id)];
[self sendErrorMsg:err];
return NO;
}
// 配置解碼器
codec_ctx = avcodec_alloc_context3(codec);
// 把stream中的參數(shù)復(fù)制到codec_ctx中
int ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
if (ret < 0)
{
[self sendErrorMsg:@"avcodec_parameters_to_context() Failed to copy codec params"];
return NO;
}
switch (stream->codecpar->codec_type)
{
case AVMEDIA_TYPE_AUDIO:
if (avcodec_open2(codec_ctx, codec, NULL))
{
[self sendErrorMsg:@"avcodec_open2() 該audio流沒(méi)有支持的解碼器"];
return NO;
}
mAudioStream = stream;
mAudioCodecCtx = codec_ctx;
mMetaInfo.channels = codec_ctx->channels;
mMetaInfo.channellayout = av_get_default_channel_layout(codec_ctx->channels);
mMetaInfo.samplerate = codec_ctx->sample_rate;
// mMetaInfo.samplefmt = AV_SAMPLE_FMT_S16;
mMetaInfo.samplefmt = codec_ctx->sample_fmt;
stream->discard = AVDISCARD_DEFAULT;
mAudioCodecCtx->refcounted_frames = 1;
NSString *info = [NSString stringWithFormat:@"audio ok sample_rate: %d channel_layout: %d sample_fmt: %d",mMetaInfo.samplerate,mMetaInfo.channellayout,mMetaInfo.samplefmt];
break;
case AVMEDIA_TYPE_VIDEO:
// LOGD << "video decoder = " << avcodec_get_name(stream->codecpar->codec_id);
codec_ctx->workaround_bugs = 1;
codec_ctx->lowres = 0;
if (codec_ctx->lowres > codec->max_lowres)
{
codec_ctx->lowres = codec->max_lowres;
}
//逆離散余弦轉(zhuǎn)換算法
codec_ctx->idct_algo = FF_IDCT_AUTO;
//開(kāi)啟環(huán)路濾波杈抢。
codec_ctx->skip_loop_filter = AVDISCARD_DEFAULT;
codec_ctx->error_concealment = 3;
if (avcodec_open2(codec_ctx, codec, NULL))
{
[self sendErrorMsg:@"avcodec_open2() 該video流沒(méi)有支持的解碼器"];
return NO;
}
mVideoStream = stream;
mVideoCodecCtx = codec_ctx;
mVideoCodecCtx->refcounted_frames = 1;
if (codec_ctx->width && codec_ctx->height)
{
mMetaInfo.width = codec_ctx->width;
mMetaInfo.height = codec_ctx->height;
}
else
{
mMetaInfo.width = codec_ctx->coded_width;
mMetaInfo.height = codec_ctx->coded_height;
}
if (!mMetaInfo.width || !mMetaInfo.height)
{
[self sendErrorMsg:@"parse video width and height failed"];
return NO;
}
mMetaInfo.m_frame_rate = (int)av_q2d(stream->r_frame_rate);
stream->discard = AVDISCARD_DEFAULT;
NSString *info = [NSString stringWithFormat:@"video ok width: %d height: %d video decoder:%s",mMetaInfo.width,mMetaInfo.height,avcodec_get_name(stream->codecpar->codec_id)];
break;
default:
break;
}
return YES;
}
完成上述初始化步驟后数尿,開(kāi)始讀取數(shù)據(jù)與解碼
- (void)start {
if (!mbAbort)
{
return;
}
mbAbort = false;
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
[self readData];
});
}
- (void)stop {
mbAbort = true;
}
- (void)readData {
while (!mbAbort)
{
AVPacket *pkt = av_packet_alloc();
pkt->flags = 0;
int ret = [self readFrame:pkt];
if (ret != CPCErrorCodekErrorNone)
{
if (ret != CPCErrorCodekErrorEOF)
{
self.state = CPCameraRTSPDecoderStatekStoppedWithError;
}
NSLog(@"rtsp流已讀完");
mbEOF = true;
av_packet_free(&pkt);
pkt = nil;
break;
}
if (mMetaInfo.mbAudioOk && [self isAudioPkt:pkt])
{
// av_packet_free(&pkt);
// pkt = nil;
[self audioFrameDecode:pkt];
}
else if (mMetaInfo.mbVideoOk && [self isVideoPkt:pkt])
{
[self videoFrameDecode:pkt];
}
else
{
av_packet_free(&pkt);
pkt = nil;
}
}
}
- (BOOL)isVideoPkt:(AVPacket *)pkt {
return pkt->stream_index == mVideoIndex;
}
- (BOOL)isAudioPkt:(AVPacket *)pkt {
return pkt->stream_index == mAudioIndex;
}
- (int)readFrame:(AVPacket *)pkt{
int ret = CPCErrorCodekErrorNone;
while (true)
{
// NSLog(@"讀包中");
miReadTimeUs = av_gettime();
ret = av_read_frame(m_pFmtCtx, pkt);
miReadTimeUs = 0;
if (ret < 0)
{
// 讀取完一個(gè)pkt了
if ((AVERROR_EOF == ret /* || avio_feof(ctx->fmt_ctx->pb)*/))
{
ret = CPCErrorCodekErrorEOF;
break;
}
if (m_pFmtCtx->pb && m_pFmtCtx->pb->error)
{
NSString *err = [NSString stringWithFormat:@"stream read error, ret: %d, error: %d",ret,m_pFmtCtx->pb->error];
[self sendErrorMsg:err];
if (-1094995529 == ret && -104 == m_pFmtCtx->pb->error)
{
[self sendErrorMsg:@"vod read error after resume, try again"];
continue;
}
ret = CPCErrorCodekErrorStreamReadError;
break;
}
continue;
}
ret = CPCErrorCodekErrorNone;
break;
}
return ret;
}
- 每讀完一個(gè)包后,開(kāi)始解碼
- 視頻解碼成AVFrame幀結(jié)構(gòu)
- (void)videoFrameDecode:(AVPacket *)pkt {
int got_frame = 0;
double pts = 0.0;
AVFrame *frametmp = av_frame_alloc();
int ret = avcodec_decode_video2(mVideoCodecCtx, frametmp, &got_frame, pkt);
// log:pkt->size
if (ret >= 0 && got_frame)
{
// av_q2d 在ffmpeg中進(jìn)行換算惶楼,將不同時(shí)間基的值轉(zhuǎn)成按秒為單位的值計(jì)算如下
pts *= av_q2d(mVideoStream->time_base);
// NSLog(@"get a avframe(frametmp)");
}
av_frame_free(&frametmp);
av_packet_free(&pkt);
}
- (void)audioFrameDecode:(AVPacket *)pkt {
AVFrame *aframe = av_frame_alloc();
int got_frame = 0;
double pts = 0.0;
int len1 = 0;
len1 = avcodec_decode_audio4(mAudioCodecCtx, aframe, &got_frame, pkt);
if (len1 < 0)
{
// LOGD << "audio decode failed, get another packet, len1: " << len1;
char errbuf[256];
const char *errbuf_ptr = errbuf;
if (av_strerror(len1, errbuf, sizeof(errbuf)) < 0)
{
errbuf_ptr = strerror(AVUNERROR(len1));
}
// LOGD << errbuf_ptr;
// LOGD << "audio decode failed, get another packet, len1: " << len1;
}
else
{
if (got_frame <= 0)
{
av_frame_free(&aframe);
aframe = nil;
// LOGD << "can not decode a packet, try forward";
return;
}
else
{
pts = av_frame_get_best_effort_timestamp(aframe) * av_q2d(mAudioStream->time_base);
// 到這里已經(jīng)解碼完了右蹦,如果需要可進(jìn)行重采樣操作
CPCameraAudioFrame *audioFrame = [self resample:avframe];
aframe = nil;
}
}
av_packet_free(&pkt);
}
- (CPCameraAudioFrame *)resample:(AVFrame *)aframe
{
int64_t channellayout = mMetaInfo.channels > 1 ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
//corresponding to ffmpeg AV_SAMPLE_FMT (AV_SAMPLE_FMT_S16)
int destSampleRate = mMetaInfo.samplerate;
int destChannels = mMetaInfo.channels;
int mDestSampleFmt = 1;
if (m_pSwrCtx == nil)
{
m_pSwrCtx = (struct SwrContext *)swr_alloc_set_opts(NULL, channellayout,
(enum AVSampleFormat)mDestSampleFmt,
mMetaInfo.samplerate,
channellayout,
(enum AVSampleFormat)aframe->format,
mMetaInfo.samplerate,
0,
NULL);
if (!m_pSwrCtx || swr_init(m_pSwrCtx) < 0)
{
// LOGE << "swr_ctx create failed, try again";
swr_free(&m_pSwrCtx);
m_pSwrCtx = NULL;
return nil;
}
// LOGD << "generate swrcontext";
}
if (m_pSwrCtx != nil)
{
// int64_t stime = rtc::TimeMillis();
// 這里是自行生成源數(shù)據(jù)幀歼捐,實(shí)際工程中應(yīng)該將解碼后的PCM數(shù)據(jù)填入src_data中
// int linesize = 0;
int dst_nb_samples = av_rescale_rnd(aframe->nb_samples, destSampleRate, aframe->sample_rate, AV_ROUND_UP);
int destSampleSize = av_samples_get_buffer_size(NULL,
destChannels, 1,
(enum AVSampleFormat)mDestSampleFmt,
1);
// int destSampleSize = m_iDestChannels * av_get_bytes_per_sample((AVSampleFormat)m_eSampleFmt);
int destFrameSize = destSampleSize * dst_nb_samples;
// uint8_t *destdata = new uint8_t[destFrameSize];
uint8_t *destdata = (uint8_t *)malloc(destFrameSize);
memset(destdata, 0, destFrameSize);
// LOGD << "dest sample nb = " << dst_nb_samples
// << " destsamplesize = " << destFrameSize;
// 重采樣操作
int ret = swr_convert(m_pSwrCtx, &destdata, dst_nb_samples, (const uint8_t **)&(aframe->data), aframe->nb_samples);
if (ret > 0)
{
CPCameraAudioFrame *ocaudioframe = [[CPCameraAudioFrame alloc]initWithDesData:destdata dataSize:ret sampleformate:mDestSampleFmt channels:destChannels samplerate:destSampleRate pts:0.0f];
// CPCameraAudioFrame *ocaudioframe = [self setupAudioFrameWithDesData:(char *)destdata dataSize:ret sampleformate:mDestSampleFmt channels:destChannels samplerate:mDestSampleFmt pts:0.0f];
return ocaudioframe;
}
}
return nil;
}
@interface CPCameraAudioFrame : NSObject
@property (nonatomic, assign)uint8_t *desFrame;
@property (nonatomic, assign)int m_iSampleSizeBytes;
- (instancetype)initWithDesData:(uint8_t *)desFrame
dataSize:(int)datasize
sampleformate:(int)sampleformate
channels:(int)channels
samplerate:(int)samplerate
pts:(double)pts;
@end
@implementation CPCameraAudioFrame
- (instancetype)initWithDesData:(uint8_t *)desFrame dataSize:(int)datasize
sampleformate:(int)sampleformate
channels:(int)channels
samplerate:(int)samplerate
pts:(double)pts {
if (self = [super init]) {
_m_iSampleSizeBytes = 2;
_m_iSampleSizeBytes = av_get_bytes_per_sample(sampleformate);
_desFrame = (uint8_t *)malloc(datasize);
memcpy(_desFrame, desFrame, datasize);
}
return self;
}