ijkplayer的音頻解碼是不支持硬解的践叠,音頻播放使用的API是OpenSL ES或AudioTrack。
- AudioTrack
AudioTrack是專門為Android應用提供的java API妖谴。
使用AudioTrack API來輸出音頻就需要把音頻數(shù)據(jù)從java層拷貝到native層窿锉。而OpenSL ES API是Android NDK提供的native接口酌摇,它可以在native層直接獲取和處理數(shù)據(jù),因此為了提高效率嗡载,應該使用OpenSL ES API窑多。通過如下java接口設置音頻輸出API:
ijkMediaPlayer.setOption(IjkMediaPlayer.OPT_CATEGORY_PLAYER, "opensles", 0);
Ijkplayer使用jni4android來為AudioTrack的java API自動生成JNI native代碼。
我們盡量選擇底層的代碼來進行研究洼滚,因此本篇文章梳理一遍OpenSL ES API在ijkplayer中的使用埂息。
源碼分析
創(chuàng)建播放器音頻輸出對象
調用如下函數(shù)生成音頻輸出對象:
SDL_Aout *SDL_AoutAndroid_CreateForOpenSLES()
創(chuàng)建并初始化Audio Engine:
//創(chuàng)建
SLObjectItf slObject = NULL;
ret = slCreateEngine(&slObject, 0, NULL, 0, NULL, NULL);
CHECK_OPENSL_ERROR(ret, "%s: slCreateEngine() failed", __func__);
opaque->slObject = slObject;
//初始化
ret = (*slObject)->Realize(slObject, SL_BOOLEAN_FALSE);
CHECK_OPENSL_ERROR(ret, "%s: slObject->Realize() failed", __func__);
//獲取SLEngine接口對象slEngine
SLEngineItf slEngine = NULL;
ret = (*slObject)->GetInterface(slObject, SL_IID_ENGINE, &slEngine);
CHECK_OPENSL_ERROR(ret, "%s: slObject->GetInterface() failed", __func__);
opaque->slEngine = slEngine;
打開音頻輸出設備:
//使用slEngine打開輸出設備
SLObjectItf slOutputMixObject = NULL;
const SLInterfaceID ids1[] = {SL_IID_VOLUME};
const SLboolean req1[] = {SL_BOOLEAN_FALSE};
ret = (*slEngine)->CreateOutputMix(slEngine, &slOutputMixObject, 1, ids1, req1);
CHECK_OPENSL_ERROR(ret, "%s: slEngine->CreateOutputMix() failed", __func__);
opaque->slOutputMixObject = slOutputMixObject;
//初始化
ret = (*slOutputMixObject)->Realize(slOutputMixObject, SL_BOOLEAN_FALSE);
CHECK_OPENSL_ERROR(ret, "%s: slOutputMixObject->Realize() failed", __func__);
將上述創(chuàng)建的OpenSL ES相關對象保存到SDL_Aout_Opaque中。
設置播放器音頻輸出對象的回調函數(shù):
aout->free_l = aout_free_l;
aout->opaque_class = &g_opensles_class;
aout->open_audio = aout_open_audio;
aout->pause_audio = aout_pause_audio;
aout->flush_audio = aout_flush_audio;
aout->close_audio = aout_close_audio;
aout->set_volume = aout_set_volume;
aout->func_get_latency_seconds = aout_get_latency_seconds;
配置并創(chuàng)建音頻播放器
通過如下函數(shù)進行:
static int aout_open_audio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
-
配置數(shù)據(jù)源
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = { SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, OPENSLES_BUFFERS }; SLDataFormat_PCM *format_pcm = &opaque->format_pcm; format_pcm->formatType = SL_DATAFORMAT_PCM; format_pcm->numChannels = desired->channels; format_pcm->samplesPerSec = desired->freq * 1000; // milli Hz format_pcm->bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; format_pcm->containerSize = SL_PCMSAMPLEFORMAT_FIXED_16; switch (desired->channels) { case 2: format_pcm->channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; break; case 1: format_pcm->channelMask = SL_SPEAKER_FRONT_CENTER; break; default: ALOGE("%s, invalid channel %d", __func__, desired->channels); goto fail; } format_pcm->endianness = SL_BYTEORDER_LITTLEENDIAN; SLDataSource audio_source = {&loc_bufq, format_pcm};
-
配置數(shù)據(jù)管道
SLDataLocator_OutputMix loc_outmix = { SL_DATALOCATOR_OUTPUTMIX, opaque->slOutputMixObject }; SLDataSink audio_sink = {&loc_outmix, NULL};
-
其它參數(shù)
const SLInterfaceID ids2[] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_VOLUME, SL_IID_PLAY }; static const SLboolean req2[] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
-
創(chuàng)建播放器
ret = (*slEngine)->CreateAudioPlayer(slEngine, &slPlayerObject, &audio_source, &audio_sink, sizeof(ids2) / sizeof(*ids2), ids2, req2);
-
獲取相關接口
//獲取seek和play接口 ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_PLAY, &opaque->slPlayItf); CHECK_OPENSL_ERROR(ret, "%s: slPlayerObject->GetInterface(SL_IID_PLAY) failed", __func__); //音量調節(jié)接口 ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_VOLUME, &opaque->slVolumeItf); CHECK_OPENSL_ERROR(ret, "%s: slPlayerObject->GetInterface(SL_IID_VOLUME) failed", __func__); //獲取音頻輸出的BufferQueue接口 ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &opaque->slBufferQueueItf); CHECK_OPENSL_ERROR(ret, "%s: slPlayerObject->GetInterface(SL_IID_ANDROIDSIMPLEBUFFERQUEUE) failed", __func__);
設置回調函數(shù)
回調函數(shù)并不傳遞音頻數(shù)據(jù)遥巴,它只是告訴程序:我已經(jīng)準備好接受處理(播放)數(shù)據(jù)了千康。這時候就可以調用Enqueue向BufferQueue中插入音頻數(shù)據(jù)了。
ret = (*opaque->slBufferQueueItf)->RegisterCallback(opaque->slBufferQueueItf, aout_opensles_callback, (void*)aout);
CHECK_OPENSL_ERROR(ret, "%s: slBufferQueueItf->RegisterCallback() failed", __func__);
-
初始化其它參數(shù)
opaque->bytes_per_frame = format_pcm->numChannels * format_pcm->bitsPerSample / 8;//每一幀的bytes數(shù)铲掐,此處將一個采樣點作為一幀 opaque->milli_per_buffer = OPENSLES_BUFLEN;//一個buffer中的音頻時長拾弃,單位為milliseconds opaque->frames_per_buffer = opaque->milli_per_buffer * format_pcm->samplesPerSec / 1000000; // samplesPerSec is in milli,一個buffer中的音頻時長*每秒的樣本(幀)數(shù),得到每個音頻buffer中的幀數(shù) opaque->bytes_per_buffer = opaque->bytes_per_frame * opaque->frames_per_buffer;//最后求出每個buffer中含有的byte數(shù)目摆霉。 opaque->buffer_capacity = OPENSLES_BUFFERS * opaque->bytes_per_buffer;
此回調函數(shù)每執(zhí)行一次Dequeue會被執(zhí)行一次豪椿。
音頻數(shù)據(jù)的處理
音頻數(shù)據(jù)的處理為典型的生產(chǎn)者消費者模型,解碼線程解碼出音頻數(shù)據(jù)插入到隊列中携栋,音頻驅動程序取出數(shù)據(jù)將聲音播放出來搭盾。
audio_thread函數(shù)為音頻解碼線程主函數(shù):
static int audio_thread(void *arg){
do {
ffp_audio_statistic_l(ffp);
if ((got_frame = decoder_decode_frame(ffp, &is->auddec, frame, NULL)) < 0)//從PacketQueue中取出pakcet并進行解碼,生成一幀數(shù)據(jù)
...
if (!(af = frame_queue_peek_writable(&is->sampq)))
goto the_end;
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
af->pos = frame->pkt_pos;
af->serial = is->auddec.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
av_frame_move_ref(af->frame, frame);
frame_queue_push(&is->sampq);//將幀數(shù)據(jù)插入幀隊列 FrameQueue
}
aout_thread_n 為音頻輸出線程主函數(shù):
static int aout_thread_n(SDL_Aout *aout){
...
SDL_LockMutex(opaque->wakeup_mutex);
//如果沒有退出播放&&(當前播放器狀態(tài)為暫停||插入音頻BufferQueue中的數(shù)據(jù)條數(shù)大于OPENSLES_BUFFERS)
if (!opaque->abort_request && (opaque->pause_on || slState.count >= OPENSLES_BUFFERS)) {
//不知道為什么if下面又加了一層while??
while (!opaque->abort_request && (opaque->pause_on || slState.count >= OPENSLES_BUFFERS)) {
//如果此時為非暫停狀態(tài)婉支,將播放器狀態(tài)置為PLAYING
if (!opaque->pause_on) {
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PLAYING);
}
//如果暫脱煊纾或者隊列中數(shù)據(jù)過多,這里都會等待一個條件變量向挖,并將過期時間置為1秒蝌以,應該是防止BufferQueue中的數(shù)據(jù)不再快速增加
SDL_CondWaitTimeout(opaque->wakeup_cond, opaque->wakeup_mutex, 1000);
SLresult slRet = (*slBufferQueueItf)->GetState(slBufferQueueItf, &slState);
if (slRet != SL_RESULT_SUCCESS) {
ALOGE("%s: slBufferQueueItf->GetState() failed\n", __func__);
SDL_UnlockMutex(opaque->wakeup_mutex);
}
//暫停播放
if (opaque->pause_on)
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PAUSED);
}
//恢復播放
if (!opaque->abort_request && !opaque->pause_on) {
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PLAYING);
}
}
...
next_buffer = opaque->buffer + next_buffer_index * bytes_per_buffer;
next_buffer_index = (next_buffer_index + 1) % OPENSLES_BUFFERS;
//調用回調函數(shù)生成插入到BufferQueue中的數(shù)據(jù)
audio_cblk(userdata, next_buffer, bytes_per_buffer);
//如果需要刷新BufferQueue數(shù)據(jù),則清除數(shù)據(jù)何之,何時需要清理數(shù)據(jù)饼灿??解釋在下面
if (opaque->need_flush) {
(*slBufferQueueItf)->Clear(slBufferQueueItf);
opaque->need_flush = false;
}
//不知道為什么會判斷兩次帝美?碍彭?
if (opaque->need_flush) {
ALOGE("flush");
opaque->need_flush = 0;
(*slBufferQueueItf)->Clear(slBufferQueueItf);
} else {
//最終將數(shù)據(jù)插入到BufferQueue中。
slRet = (*slBufferQueueItf)->Enqueue(slBufferQueueItf, next_buffer, bytes_per_buffer);
...
}
以下是為條件變量opaque->wakeup_cond 發(fā)送signal的幾個函數(shù)悼潭,目的是讓輸出線程快速響應
static void aout_opensles_callback(SLAndroidSimpleBufferQueueItf caller, void *pContext)
static void aout_close_audio(SDL_Aout *aout)
static void aout_pause_audio(SDL_Aout *aout, int pause_on)
static void aout_flush_audio(SDL_Aout *aout)
static void aout_set_volume(SDL_Aout *aout, float left_volume, float right_volume)
第一個為音頻播放器的BufferQueue設置的回調函數(shù)庇忌,每從隊列中取出一條數(shù)據(jù)執(zhí)行一次,這個可以理解舰褪,隊列中去除一條數(shù)據(jù)皆疹,立刻喚醒線程Enqueue數(shù)據(jù)。
第二個為關閉音頻播放器的時候調用的函數(shù)占拍,立馬退出線程略就。
第三個為暫停/播放音頻播放器函數(shù)捎迫,馬上設置播放器狀態(tài)。
第四個為清空BufferQueue時調用的函數(shù)表牢,立刻喚醒線程Enqueue數(shù)據(jù)窄绒。
第五個為設置音量函數(shù),馬上設置音量崔兴。
通過調用如下函數(shù)生成插入到BufferQueue中的數(shù)據(jù) :
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len){
...
if (is->audio_buf_index >= is->audio_buf_size) {
//如果buffer中沒有數(shù)據(jù)了彰导,生成新數(shù)據(jù)。
audio_size = audio_decode_frame(ffp);
...
if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
//直接拷貝到stream
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
else {
memset(stream, 0, len1);
if (!is->muted && is->audio_buf)
//進行音量調整和混音
SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
}
}
生成新數(shù)據(jù)的函數(shù)不是對音頻數(shù)據(jù)進行解碼敲茄,而是對幀數(shù)據(jù)進行了二次處理位谋,對音頻進行了必要的重采樣或者變速變調。
static int audio_decode_frame(FFPlayer *ffp){
...
//重采樣
len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
...
//音頻變速變調
int ret_len = ijk_soundtouch_translate(is->handle, is->audio_new_buf, (float)(ffp->pf_playback_rate), (float)(1.0f/ffp->pf_playback_rate),
resampled_data_size / 2, bytes_per_sample, is->audio_tgt.channels, af->frame->sample_rate);
...
//最后將數(shù)據(jù)保存到audio_buf中
is->audio_buf = (uint8_t*)is->audio_new_buf;
...
}
主要分析變速播放框架實現(xiàn)細節(jié)堰燎,不分析sonic以及soundtouch變速算法掏父。在我的sonic變速變調原理一文中會詳細講解基于基音周期來實現(xiàn)變速變調的原理
1.變速入口分析
從jni層的_setPropertyFloat函數(shù)
static void ijkMediaPlayer_setPropertyFloat(JNIEnv *env, jobject thiz, jint id, jfloat value)
{
IjkMediaPlayer *mp = jni_get_media_player(env, thiz);
JNI_CHECK_GOTO(mp, env, NULL, "mpjni: setPropertyFloat: null mp", LABEL_RETURN);
ijkmp_set_property_float(mp, id, value);
LABEL_RETURN:
ijkmp_dec_ref_p(&mp);
return;
}
到ff_ffplay.c中的ffp_set_property_float函數(shù)來設置速度
void ffp_set_property_float(FFPlayer *ffp, int id, float value)
{
switch (id) {
case FFP_PROP_FLOAT_PLAYBACK_RATE:
ffp_set_playback_rate(ffp, value);
break;
case FFP_PROP_FLOAT_PLAYBACK_VOLUME:
ffp_set_playback_volume(ffp, value);
break;
default:
return;
}
}
跟蹤ffp_set_playback_rate函數(shù),我們可以看到這里主要把速度變量設置給了ffp->pf_playback_rate以及把ffp->pf_playback_rate_changed置1.
void ffp_set_playback_rate(FFPlayer *ffp, float rate)
{
if (!ffp)
return;
av_log(ffp, AV_LOG_INFO, "Playback rate: %f\n", rate);
ffp->pf_playback_rate = rate;
ffp->pf_playback_rate_changed = 1;
}
2.音頻變速實現(xiàn)
跟蹤這倆個變量秆剪,我們可以看到在audio_decode_frame中损同,我們新增了音頻的變速變調算法來處理音頻變速,
#if defined(__ANDROID__)
if (ffp->soundtouch_enable && ffp->pf_playback_rate != 1.0f && !is->abort_request) {
av_fast_malloc(&is->audio_new_buf, &is->audio_new_buf_size, out_size * translate_time);
for (int i = 0; i < (resampled_data_size / 2); i++)
{
is->audio_new_buf[i] = (is->audio_buf1[i * 2] | (is->audio_buf1[i * 2 + 1] << 8));
}
int ret_len = ijk_soundtouch_translate(is->handle, is->audio_new_buf, (float)(ffp->pf_playback_rate), (float)(1.0f/ffp->pf_playback_rate),
resampled_data_size / 2, bytes_per_sample, is->audio_tgt.channels, af->frame->sample_rate);
if (ret_len > 0) {
is->audio_buf = (uint8_t*)is->audio_new_buf;
resampled_data_size = ret_len;
} else {
translate_time++;
goto reload;
}
} else if (ffp->sonic_enabled && ffp->pf_playback_rate != 1.0f && !is->abort_request) {
av_fast_malloc(&is->audio_new_buf, &is->audio_new_buf_size, out_size * translate_time * 2);
for (int i = 0; i < (resampled_data_size / 2); i++)
{
is->audio_new_buf[i] = (is->audio_buf1[i * 2] | (is->audio_buf1[i * 2 + 1] << 8));
}
int ret_len = sonicStream_translate(is->sonic_handle,is->audio_new_buf,ffp->pf_playback_rate,
(float)(1.0f/ffp->pf_playback_rate),is->audio_tgt.channels, af->frame->sample_rate,resampled_data_size / 2,bytes_per_sample);
if (ret_len > 0) {
is->audio_buf = (uint8_t*)is->audio_new_buf;
resampled_data_size = ret_len;
} else {
translate_time++;
goto reload;
}
}
#endif
sonic變速是我加的鸟款,下面在音頻回調sdl_audio_callback函數(shù)中,
if (ffp->pf_playback_rate_changed) {
ffp->pf_playback_rate_changed = 0;
#if defined(__ANDROID__)
if (!ffp->soundtouch_enable && !ffp->sonic_enabled) {
SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
}
#else
SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
#endif
}
if (ffp->pf_playback_volume_changed) {
ffp->pf_playback_volume_changed = 0;
SDL_AoutSetPlaybackVolume(ffp->aout, ffp->pf_playback_volume);
}
上面主要處理對ffp->pf_playback_rate_changed的判斷,如果沒有開啟soundtouch變速或sonic變速茂卦,直接走audiotrack的變速來處理何什。在android6.0系統(tǒng)之下,audiotrack的變速存在變調問題等龙。
從上面我們可以看到這里處理了音頻的變速处渣,但是沒有處理只有視頻流的視頻(也就是沒有音頻流)。
3.視頻變速實現(xiàn)
ijk中默認選擇音頻流作為時間基準的蛛砰,我們看下視頻是如何來做變速的罐栈。視頻上很容易就可以做到倍速播放,一般的視頻格式都是每秒固定的幀數(shù)泥畅,按比例跳幀就可以了荠诬。
static void video_refresh(FFPlayer *opaque, double *remaining_time)
{
FFPlayer *ffp = opaque;
VideoState *is = ffp->is;
double time;
Frame *sp, *sp2;
if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
check_external_clock_speed(is);
if (!ffp->display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
time = av_gettime_relative() / 1000000.0;
if (is->force_refresh || is->last_vis_time + ffp->rdftspeed < time) {
video_display2(ffp);
is->last_vis_time = time;
}
*remaining_time = FFMIN(*remaining_time, is->last_vis_time + ffp->rdftspeed - time);
}
if (is->video_st) {
retry:
//add by hxk,support only video change speed
if(!is->audio_st && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK) {
if(ffp->pf_playback_rate != 1.0f){
change_external_clock_speed(is,ffp->pf_playback_rate);
}
}
//add end
if (frame_queue_nb_remaining(&is->pictq) == 0) {
// nothing to do, no picture to display in the queue
} else {
double last_duration, duration, delay;
Frame *vp, *lastvp;
/* dequeue the picture */
lastvp = frame_queue_peek_last(&is->pictq);
vp = frame_queue_peek(&is->pictq);
if (vp->serial != is->videoq.serial) {
frame_queue_next(&is->pictq);
goto retry;
}
if (lastvp->serial != vp->serial)
is->frame_timer = av_gettime_relative() / 1000000.0;
if (is->paused)
goto display;
/* compute nominal last_duration */
last_duration = vp_duration(is, lastvp, vp);
delay = compute_target_delay(ffp, last_duration, is);
time= av_gettime_relative()/1000000.0;
if (isnan(is->frame_timer) || time < is->frame_timer)
is->frame_timer = time;
if (time < is->frame_timer + delay) {
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
goto display;
}
is->frame_timer += delay;
if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
is->frame_timer = time;
SDL_LockMutex(is->pictq.mutex);
if (!isnan(vp->pts))
update_video_pts(is, vp->pts, vp->pos, vp->serial);
SDL_UnlockMutex(is->pictq.mutex);
if (frame_queue_nb_remaining(&is->pictq) > 1) {
Frame *nextvp = frame_queue_peek_next(&is->pictq);
duration = vp_duration(is, vp, nextvp);
if(!is->step && (ffp->framedrop > 0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration) {
frame_queue_next(&is->pictq);
goto retry;
}
}
if (is->subtitle_st) {
while (frame_queue_nb_remaining(&is->subpq) > 0) {
sp = frame_queue_peek(&is->subpq);
if (frame_queue_nb_remaining(&is->subpq) > 1)
sp2 = frame_queue_peek_next(&is->subpq);
else
sp2 = NULL;
if (sp->serial != is->subtitleq.serial
|| (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
|| (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
{
if (sp->uploaded) {
ffp_notify_msg4(ffp, FFP_MSG_TIMED_TEXT, 0, 0, "", 1);
}
frame_queue_next(&is->subpq);
} else {
break;
}
}
}
frame_queue_next(&is->pictq);
is->force_refresh = 1;
SDL_LockMutex(ffp->is->play_mutex);
if (is->step) {
is->step = 0;
if (!is->paused)
stream_update_pause_l(ffp);
}
SDL_UnlockMutex(ffp->is->play_mutex);
}
display:
/* display picture */
if (!ffp->display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
video_display2(ffp);
}
is->force_refresh = 0;
if (ffp->show_status) {
static int64_t last_time;
int64_t cur_time;
int aqsize, vqsize, sqsize __unused;
double av_diff;
cur_time = av_gettime_relative();
if (!last_time || (cur_time - last_time) >= 30000) {
aqsize = 0;
vqsize = 0;
sqsize = 0;
if (is->audio_st)
aqsize = is->audioq.size;
if (is->video_st)
vqsize = is->videoq.size;
#ifdef FFP_MERGE
if (is->subtitle_st)
sqsize = is->subtitleq.size;
#else
sqsize = 0;
#endif
av_diff = 0;
if (is->audio_st && is->video_st)
av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
else if (is->video_st)
av_diff = get_master_clock(is) - get_clock(&is->vidclk);
else if (is->audio_st)
av_diff = get_master_clock(is) - get_clock(&is->audclk);
av_log(NULL, AV_LOG_INFO,
"%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
get_master_clock(is),
(is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
av_diff,
is->frame_drops_early + is->frame_drops_late,
aqsize / 1024,
vqsize / 1024,
sqsize,
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
fflush(stdout);
last_time = cur_time;
}
}
}
其實這里就是視頻變速的實現(xiàn),也是視頻同步音頻的實現(xiàn)位仁,來實現(xiàn)視頻變速柑贞。
3.1.沒有音頻的視頻變速
通過前面的分析,我們可以知道ijk沒有處理只有視頻流的視頻變速(沒有音頻流的視頻)聂抢。當沒有音頻流的時候钧嘶,在read_thread函數(shù)中
/* open the streams */
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]);
} else {//如果沒有音頻流
ffp->av_sync_type = AV_SYNC_VIDEO_MASTER;
is->av_sync_type = ffp->av_sync_type;
}
我們可以看到如果沒有音頻流,ijk中選擇視頻流作為時間基準琳疏。個人覺得還是外部時鐘作為時間基準比較好有决,比較準闸拿。
那么我們可以如何修改呢?
3.1.1.修改沒有音頻的視頻的同步方式為外部時鐘
/* open the streams */
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]);
} else {
ffp->av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
is->av_sync_type = ffp->av_sync_type;
}
當沒有音頻流的時候书幕,選擇外部時鐘作為時間基準新荤。
3.1.2.修改外部時鐘變速
修改video_refresh函數(shù)
/* called to display each frame */
static void video_refresh(FFPlayer *opaque, double *remaining_time)
{
FFPlayer *ffp = opaque;
VideoState *is = ffp->is;
double time;
Frame *sp, *sp2;
if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
check_external_clock_speed(is);
if (!ffp->display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
time = av_gettime_relative() / 1000000.0;
if (is->force_refresh || is->last_vis_time + ffp->rdftspeed < time) {
video_display2(ffp);
is->last_vis_time = time;
}
*remaining_time = FFMIN(*remaining_time, is->last_vis_time + ffp->rdftspeed - time);
}
if (is->video_st) {
retry:
//當沒有音頻流的,有視頻流時按咒,且時間基準為外部時鐘
//add by hxk,support only video change speed
if(!is->audio_st && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK) {
//如果速度不等于1迟隅,改變外部時鐘速度
if(ffp->pf_playback_rate != 1.0f){
change_external_clock_speed(is,ffp->pf_playback_rate);
}
}
//add end
if (frame_queue_nb_remaining(&is->pictq) == 0) {
// nothing to do, no picture to display in the queue
} else {
double last_duration, duration, delay;
Frame *vp, *lastvp;
/* dequeue the picture */
lastvp = frame_queue_peek_last(&is->pictq);
vp = frame_queue_peek(&is->pictq);
if (vp->serial != is->videoq.serial) {
frame_queue_next(&is->pictq);
goto retry;
}
if (lastvp->serial != vp->serial)
is->frame_timer = av_gettime_relative() / 1000000.0;
if (is->paused)
goto display;
/* compute nominal last_duration */
last_duration = vp_duration(is, lastvp, vp);
delay = compute_target_delay(ffp, last_duration, is);
time= av_gettime_relative()/1000000.0;
if (isnan(is->frame_timer) || time < is->frame_timer)
is->frame_timer = time;
if (time < is->frame_timer + delay) {
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
goto display;
}
is->frame_timer += delay;
if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
is->frame_timer = time;
SDL_LockMutex(is->pictq.mutex);
if (!isnan(vp->pts))
update_video_pts(is, vp->pts, vp->pos, vp->serial);
SDL_UnlockMutex(is->pictq.mutex);
if (frame_queue_nb_remaining(&is->pictq) > 1) {
Frame *nextvp = frame_queue_peek_next(&is->pictq);
duration = vp_duration(is, vp, nextvp);
if(!is->step && (ffp->framedrop > 0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration) {
frame_queue_next(&is->pictq);
goto retry;
}
}
if (is->subtitle_st) {
while (frame_queue_nb_remaining(&is->subpq) > 0) {
sp = frame_queue_peek(&is->subpq);
if (frame_queue_nb_remaining(&is->subpq) > 1)
sp2 = frame_queue_peek_next(&is->subpq);
else
sp2 = NULL;
if (sp->serial != is->subtitleq.serial
|| (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
|| (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
{
if (sp->uploaded) {
ffp_notify_msg4(ffp, FFP_MSG_TIMED_TEXT, 0, 0, "", 1);
}
frame_queue_next(&is->subpq);
} else {
break;
}
}
}
frame_queue_next(&is->pictq);
is->force_refresh = 1;
SDL_LockMutex(ffp->is->play_mutex);
if (is->step) {
is->step = 0;
if (!is->paused)
stream_update_pause_l(ffp);
}
SDL_UnlockMutex(ffp->is->play_mutex);
}
display:
/* display picture */
if (!ffp->display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
video_display2(ffp);
}
is->force_refresh = 0;
if (ffp->show_status) {
static int64_t last_time;
int64_t cur_time;
int aqsize, vqsize, sqsize __unused;
double av_diff;
cur_time = av_gettime_relative();
if (!last_time || (cur_time - last_time) >= 30000) {
aqsize = 0;
vqsize = 0;
sqsize = 0;
if (is->audio_st)
aqsize = is->audioq.size;
if (is->video_st)
vqsize = is->videoq.size;
#ifdef FFP_MERGE
if (is->subtitle_st)
sqsize = is->subtitleq.size;
#else
sqsize = 0;
#endif
av_diff = 0;
if (is->audio_st && is->video_st)
av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
else if (is->video_st)
av_diff = get_master_clock(is) - get_clock(&is->vidclk);
else if (is->audio_st)
av_diff = get_master_clock(is) - get_clock(&is->audclk);
av_log(NULL, AV_LOG_INFO,
"%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
get_master_clock(is),
(is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
av_diff,
is->frame_drops_early + is->frame_drops_late,
aqsize / 1024,
vqsize / 1024,
sqsize,
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
fflush(stdout);
last_time = cur_time;
}
}
}
新增外部時鐘變速函數(shù)。
//add by hxk
static void change_external_clock_speed(VideoState *is,float speed) {
if (speed != 1.0f){
set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
}
}
//add end
這樣我們就可以解決沒有音頻的視頻流變速問題了励七。當然如果還是選擇視頻流作為時間基準的話智袭,我們可以修改刷幀速度來實現(xiàn)視頻變速。
最后一個比較讓人困惑的問題是何時才會清理BufferQueue掠抬,看一下清理的命令是在何時發(fā)出的:
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
...
if (is->auddec.pkt_serial != is->audioq.serial) {
is->audio_buf_index = is->audio_buf_size;
memset(stream, 0, len);
// stream += len;
// len = 0;
SDL_AoutFlushAudio(ffp->aout);
break;
}
...
}
它是在音頻輸出線程中獲取即將插入到BufferQueue的音頻數(shù)據(jù)吼野,調用回調函數(shù)時發(fā)出的,發(fā)出的條件如上所示两波,其中pkt_serial 為從PacketQueue隊列中取出的需要解碼的packet的serial瞳步,serial為當前PacketQueue隊列的serial。也就是說腰奋,如果兩者不等单起,就需要清理BufferQueue。這里的serial是要保證前后數(shù)據(jù)包的連續(xù)性劣坊,例如發(fā)生了Seek嘀倒,數(shù)據(jù)不連續(xù),就需要清理舊數(shù)據(jù)局冰。
注:在播放器中的VideoState成員中测蘑,audioq和解碼成員auddec中的queue是同一個隊列。
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
結束語
筆者從頭到尾把和音頻輸出相關的自認為重要的源碼做了一些解釋和記錄康二,有些細節(jié)沒有去深入研究碳胳。以后有時間慢慢學習。