項(xiàng)目位置 https://github.com/deepsadness/SDLCmakeDemo
系列內(nèi)容導(dǎo)讀
- SDL2-移植Android Studio+CMakeList集成
- Android端FFmpeg +SDL2的簡單播放器
- SDL2 Android端的簡要分析(VideoSubSystem)
- SDL2 Android端的簡要分析(AudioSubSystem)
Android 部分源碼分析
Android部分的初始化和視頻部分基本相同。
這里簡單看一下。
在SDLActivity中調(diào)用了
SDL.setupJNI()
鸵膏。SDL.setupJNI()
中SDLAudioManager.nativeSetupJNI()
開始對(duì)JNI方法進(jìn)行初始化。在
SDL_android.c
中,nativeSetupJNI
初始化JNI回調(diào)java方法的指針呜叫。
/* Audio initialization -- called before SDL_main() to initialize JNI bindings */
JNIEXPORT void JNICALL SDL_JAVA_AUDIO_INTERFACE(nativeSetupJNI)(JNIEnv* mEnv, jclass cls)
{
__android_log_print(ANDROID_LOG_VERBOSE, "SDL", "AUDIO nativeSetupJNI()");
Android_JNI_SetupThread();
mAudioManagerClass = (jclass)((*mEnv)->NewGlobalRef(mEnv, cls));
midAudioOpen = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"audioOpen", "(IIII)[I");
midAudioWriteByteBuffer = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"audioWriteByteBuffer", "([B)V");
midAudioWriteShortBuffer = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"audioWriteShortBuffer", "([S)V");
midAudioWriteFloatBuffer = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"audioWriteFloatBuffer", "([F)V");
midAudioClose = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"audioClose", "()V");
midCaptureOpen = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"captureOpen", "(IIII)[I");
midCaptureReadByteBuffer = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"captureReadByteBuffer", "([BZ)I");
midCaptureReadShortBuffer = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"captureReadShortBuffer", "([SZ)I");
midCaptureReadFloatBuffer = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"captureReadFloatBuffer", "([FZ)I");
midCaptureClose = (*mEnv)->GetStaticMethodID(mEnv, mAudioManagerClass,
"captureClose", "()V");
if (!midAudioOpen || !midAudioWriteByteBuffer || !midAudioWriteShortBuffer || !midAudioWriteFloatBuffer || !midAudioClose ||
!midCaptureOpen || !midCaptureReadByteBuffer || !midCaptureReadShortBuffer || !midCaptureReadFloatBuffer || !midCaptureClose) {
__android_log_print(ANDROID_LOG_WARN, "SDL", "Missing some Java callbacks, do you have the latest version of SDLAudioManager.java?");
}
checkJNIReady();
}
簡單的看一下播放的幾個(gè)方法苍日,都做了什么
audioOpen
SDLAudioManager->audioOpen
傳入的參數(shù)
- sampleRate
采樣率府喳。表示每秒需要的采樣字節(jié)數(shù) - is16Bit
表示是否采用16位的深度進(jìn)行采樣 - isStereo
表示是否使用雙聲道進(jìn)行采樣 - desiredFrames
預(yù)期的每次采樣的音頻幀數(shù)
public static int audioOpen(int sampleRate, boolean is16Bit, boolean isStereo, int desiredFrames) {
int channelConfig = isStereo ? AudioFormat.CHANNEL_CONFIGURATION_STEREO : AudioFormat.CHANNEL_CONFIGURATION_MONO;
int audioFormat = is16Bit ? AudioFormat.ENCODING_PCM_16BIT : AudioFormat.ENCODING_PCM_8BIT;
//計(jì)算每一幀的大小
int frameSize = (isStereo ? 2 : 1) * (is16Bit ? 2 : 1);
Log.v(TAG, "SDL audio: wanted " + (isStereo ? "stereo" : "mono") + " " + (is16Bit ? "16-bit" : "8-bit") + " " + (sampleRate / 1000f) + "kHz, " + desiredFrames + " frames buffer");
//得到預(yù)期的幀數(shù)脖卖,來計(jì)算buffer乒省。
//我們預(yù)期的幀數(shù),也不能小于getMinBufferSize得到的幀數(shù)
// Let the user pick a larger buffer if they really want -- but ye
// gods they probably shouldn't, the minimums are horrifyingly high
// latency already
desiredFrames = Math.max(desiredFrames, (AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat) + frameSize - 1) / frameSize);
if (mAudioTrack == null) {
//打開AudioTrack
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate,
channelConfig, audioFormat, desiredFrames * frameSize, AudioTrack.MODE_STREAM);
// Instantiating AudioTrack can "succeed" without an exception and the track may still be invalid
// Ref: https://android.googlesource.com/platform/frameworks/base/+/refs/heads/master/media/java/android/media/AudioTrack.java
// Ref: http://developer.android.com/reference/android/media/AudioTrack.html#getState()
if (mAudioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
Log.e(TAG, "Failed during initialization of Audio Track");
mAudioTrack = null;
return -1;
}
mAudioTrack.play();
}
Log.v(TAG, "SDL audio: got " + ((mAudioTrack.getChannelCount() >= 2) ? "stereo" : "mono") + " " + ((mAudioTrack.getAudioFormat() == AudioFormat.ENCODING_PCM_16BIT) ? "16-bit" : "8-bit") + " " + (mAudioTrack.getSampleRate() / 1000f) + "kHz, " + desiredFrames + " frames buffer");
return 0;
}
audioWriteXXXBuffer
寫入不同的Buffer格式畦木。這方法比較簡單袖扛。我們就看一種
public static void audioWriteByteBuffer(byte[] buffer) {
if (mAudioTrack == null) {
Log.e(TAG, "Attempted to make audio call with uninitialized audio!");
return;
}
for (int i = 0; i < buffer.length; ) {
//把buffer寫入,進(jìn)行播放
int result = mAudioTrack.write(buffer, i, buffer.length - i);
if (result > 0) {
i += result;
} else if (result == 0) {
try {
Thread.sleep(1);
} catch(InterruptedException e) {
// Nom nom
}
} else {
Log.w(TAG, "SDL audio: error return from write(byte)");
return;
}
}
}
audioClose
進(jìn)行關(guān)閉和釋放
public static void audioClose() {
if (mAudioTrack != null) {
mAudioTrack.stop();
mAudioTrack.release();
mAudioTrack = null;
}
}
SDL流程
SDL初始化
SDL_Init(): 初始化SDL。
SDL_OpenAudio(): 打開音頻播放器蛆封。
SDL_PauseAudio(): 開始播放唇礁。
SDL循環(huán)渲染數(shù)據(jù)
調(diào)用callback
,將正確的數(shù)據(jù)喂入
初始化SDL_AudioInit
在視頻初始化的過程惨篱,我們就看到了盏筐。SDL_Init方法,傳入SDL_INIT_AUDIO
標(biāo)志位砸讳,就會(huì)走到SDL_AudioInit
方法琢融,對(duì)音頻系統(tǒng)進(jìn)行初始化。
SDL_AudioInit
方法比較簡單,就是將JNI的方法指針給audio.impl绣夺。同時(shí)設(shè)置變量的標(biāo)志位。
static int
ANDROIDAUDIO_Init(SDL_AudioDriverImpl * impl)
{
/* Set the function pointers */
impl->OpenDevice = ANDROIDAUDIO_OpenDevice;
impl->PlayDevice = ANDROIDAUDIO_PlayDevice;
impl->GetDeviceBuf = ANDROIDAUDIO_GetDeviceBuf;
impl->CloseDevice = ANDROIDAUDIO_CloseDevice;
impl->CaptureFromDevice = ANDROIDAUDIO_CaptureFromDevice;
impl->FlushCapture = ANDROIDAUDIO_FlushCapture;
/* and the capabilities */
impl->HasCaptureSupport = SDL_TRUE;
impl->OnlyHasDefaultOutputDevice = 1;
impl->OnlyHasDefaultCaptureDevice = 1;
return 1; /* this audio target is available. */
}
在上面Android方法的初始化中欢揖,可以看到這些JNI回調(diào)java 的方法的實(shí)現(xiàn)陶耍,都在SDLAudioManager
里面。
打開音頻播放器SDL_OpenAudio
- 方法簽名
extern DECLSPEC int SDLCALL SDL_OpenAudio(SDL_AudioSpec * desired,
SDL_AudioSpec * obtained);
我們可以看到她混,SDL_OpenAudio需要傳入兩個(gè)參數(shù)烈钞,一個(gè)是我們想要的音頻格式。一個(gè)是最后實(shí)際的音頻格式坤按。
這里的SDL_AudioSpec
毯欣,是SDL中記錄音頻格式的結(jié)構(gòu)體。
typedef struct SDL_AudioSpec
{
int freq; /**< DSP frequency -- samples per second */
SDL_AudioFormat format; /**< Audio data format */
Uint8 channels; /**< Number of channels: 1 mono, 2 stereo */
Uint8 silence; /**< Audio buffer silence value (calculated) */
Uint16 samples; /**< Audio buffer size in sample FRAMES (total samples divided by channel count) */
Uint16 padding; /**< Necessary for some compile environments */
Uint32 size; /**< Audio buffer size in bytes (calculated) */
SDL_AudioCallback callback; /**< Callback that feeds the audio device (NULL to use SDL_QueueAudio()). */
void *userdata; /**< Userdata passed to callback (ignored for NULL callbacks). */
} SDL_AudioSpec;
對(duì)照函數(shù)調(diào)用圖臭脓。
- 對(duì)結(jié)果的音頻格式參數(shù)酗钞,先進(jìn)行初步的初始化。
- 分配
SDL_AudioDevice
,并初始化 - 對(duì)音頻的狀態(tài)進(jìn)行初始化
//目前是非關(guān)閉
SDL_AtomicSet(&device->shutdown, 0); /* just in case. */
//暫停
SDL_AtomicSet(&device->paused, 1);
//可用狀態(tài)
SDL_AtomicSet(&device->enabled, 1);
- 調(diào)用
current_audio.impl.OpenDevice
来累,開啟打開音頻設(shè)備砚作。
這個(gè)方法最終會(huì)調(diào)用到SDLAudioManager的open方法。
public static int[] audioOpen(int sampleRate, int audioFormat, int desiredChannels, int desiredFrames) {
return open(false, sampleRate, audioFormat, desiredChannels, desiredFrames);
}
protected static int[] open(boolean isCapture, int sampleRate, int audioFormat, int desiredChannels, int desiredFrames) {
int channelConfig;
int sampleSize;
int frameSize;
Log.v(TAG, "Opening " + (isCapture ? "capture" : "playback") + ", requested " + desiredFrames + " frames of " + desiredChannels + " channel " + getAudioFormatString(audioFormat) + " audio at " + sampleRate + " Hz");
/* On older devices let's use known good settings */
if (Build.VERSION.SDK_INT < 21) {
if (desiredChannels > 2) {
desiredChannels = 2;
}
if (sampleRate < 8000) {
sampleRate = 8000;
} else if (sampleRate > 48000) {
sampleRate = 48000;
}
}
if (audioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
int minSDKVersion = (isCapture ? 23 : 21);
if (Build.VERSION.SDK_INT < minSDKVersion) {
audioFormat = AudioFormat.ENCODING_PCM_16BIT;
}
}
switch (audioFormat)
{
case AudioFormat.ENCODING_PCM_8BIT:
sampleSize = 1;
break;
case AudioFormat.ENCODING_PCM_16BIT:
sampleSize = 2;
break;
case AudioFormat.ENCODING_PCM_FLOAT:
sampleSize = 4;
break;
default:
Log.v(TAG, "Requested format " + audioFormat + ", getting ENCODING_PCM_16BIT");
audioFormat = AudioFormat.ENCODING_PCM_16BIT;
sampleSize = 2;
break;
}
if (isCapture) {
switch (desiredChannels) {
case 1:
channelConfig = AudioFormat.CHANNEL_IN_MONO;
break;
case 2:
channelConfig = AudioFormat.CHANNEL_IN_STEREO;
break;
default:
Log.v(TAG, "Requested " + desiredChannels + " channels, getting stereo");
desiredChannels = 2;
channelConfig = AudioFormat.CHANNEL_IN_STEREO;
break;
}
} else {
switch (desiredChannels) {
case 1:
channelConfig = AudioFormat.CHANNEL_OUT_MONO;
break;
case 2:
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
break;
case 3:
channelConfig = AudioFormat.CHANNEL_OUT_STEREO | AudioFormat.CHANNEL_OUT_FRONT_CENTER;
break;
case 4:
channelConfig = AudioFormat.CHANNEL_OUT_QUAD;
break;
case 5:
channelConfig = AudioFormat.CHANNEL_OUT_QUAD | AudioFormat.CHANNEL_OUT_FRONT_CENTER;
break;
case 6:
channelConfig = AudioFormat.CHANNEL_OUT_5POINT1;
break;
case 7:
channelConfig = AudioFormat.CHANNEL_OUT_5POINT1 | AudioFormat.CHANNEL_OUT_BACK_CENTER;
break;
case 8:
if (Build.VERSION.SDK_INT >= 23) {
channelConfig = AudioFormat.CHANNEL_OUT_7POINT1_SURROUND;
} else {
Log.v(TAG, "Requested " + desiredChannels + " channels, getting 5.1 surround");
desiredChannels = 6;
channelConfig = AudioFormat.CHANNEL_OUT_5POINT1;
}
break;
default:
Log.v(TAG, "Requested " + desiredChannels + " channels, getting stereo");
desiredChannels = 2;
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
break;
}
/*
Log.v(TAG, "Speaker configuration (and order of channels):");
if ((channelConfig & 0x00000004) != 0) {
Log.v(TAG, " CHANNEL_OUT_FRONT_LEFT");
}
if ((channelConfig & 0x00000008) != 0) {
Log.v(TAG, " CHANNEL_OUT_FRONT_RIGHT");
}
if ((channelConfig & 0x00000010) != 0) {
Log.v(TAG, " CHANNEL_OUT_FRONT_CENTER");
}
if ((channelConfig & 0x00000020) != 0) {
Log.v(TAG, " CHANNEL_OUT_LOW_FREQUENCY");
}
if ((channelConfig & 0x00000040) != 0) {
Log.v(TAG, " CHANNEL_OUT_BACK_LEFT");
}
if ((channelConfig & 0x00000080) != 0) {
Log.v(TAG, " CHANNEL_OUT_BACK_RIGHT");
}
if ((channelConfig & 0x00000100) != 0) {
Log.v(TAG, " CHANNEL_OUT_FRONT_LEFT_OF_CENTER");
}
if ((channelConfig & 0x00000200) != 0) {
Log.v(TAG, " CHANNEL_OUT_FRONT_RIGHT_OF_CENTER");
}
if ((channelConfig & 0x00000400) != 0) {
Log.v(TAG, " CHANNEL_OUT_BACK_CENTER");
}
if ((channelConfig & 0x00000800) != 0) {
Log.v(TAG, " CHANNEL_OUT_SIDE_LEFT");
}
if ((channelConfig & 0x00001000) != 0) {
Log.v(TAG, " CHANNEL_OUT_SIDE_RIGHT");
}
*/
}
frameSize = (sampleSize * desiredChannels);
// Let the user pick a larger buffer if they really want -- but ye
// gods they probably shouldn't, the minimums are horrifyingly high
// latency already
int minBufferSize;
if (isCapture) {
minBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
} else {
minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);
}
desiredFrames = Math.max(desiredFrames, (minBufferSize + frameSize - 1) / frameSize);
int[] results = new int[4];
if (isCapture) {
if (mAudioRecord == null) {
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, sampleRate,
channelConfig, audioFormat, desiredFrames * frameSize);
// see notes about AudioTrack state in audioOpen(), above. Probably also applies here.
if (mAudioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
Log.e(TAG, "Failed during initialization of AudioRecord");
mAudioRecord.release();
mAudioRecord = null;
return null;
}
mAudioRecord.startRecording();
}
results[0] = mAudioRecord.getSampleRate();
results[1] = mAudioRecord.getAudioFormat();
results[2] = mAudioRecord.getChannelCount();
results[3] = desiredFrames;
} else {
if (mAudioTrack == null) {
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig, audioFormat, desiredFrames * frameSize, AudioTrack.MODE_STREAM);
// Instantiating AudioTrack can "succeed" without an exception and the track may still be invalid
// Ref: https://android.googlesource.com/platform/frameworks/base/+/refs/heads/master/media/java/android/media/AudioTrack.java
// Ref: http://developer.android.com/reference/android/media/AudioTrack.html#getState()
if (mAudioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
/* Try again, with safer values */
Log.e(TAG, "Failed during initialization of Audio Track");
mAudioTrack.release();
mAudioTrack = null;
return null;
}
mAudioTrack.play();
}
results[0] = mAudioTrack.getSampleRate();
results[1] = mAudioTrack.getAudioFormat();
results[2] = mAudioTrack.getChannelCount();
results[3] = desiredFrames;
}
Log.v(TAG, "Opening " + (isCapture ? "capture" : "playback") + ", got " + results[3] + " frames of " + results[2] + " channel " + getAudioFormatString(results[1]) + " audio at " + results[0] + " Hz");
return results;
}
這個(gè)方法嘹锁,對(duì)應(yīng)SDL傳遞過來的參數(shù)葫录。初始化播放使用時(shí)對(duì)應(yīng)使用的AudioTrack
。并將最后AudioTrack
配置的后的參數(shù)领猾,返回給SDL的desireSpec
米同。
- 接著使用返回的
audioSpec
和當(dāng)前的進(jìn)行對(duì)比,重新復(fù)制摔竿,并且如果發(fā)生了改變面粮,則重新創(chuàng)建SDL_AudioStream
。
if (build_stream) {
if (iscapture) {
device->stream = SDL_NewAudioStream(device->spec.format,
device->spec.channels, device->spec.freq,
obtained->format, obtained->channels, obtained->freq);
} else {
device->stream = SDL_NewAudioStream(obtained->format, obtained->channels,
obtained->freq, device->spec.format,
device->spec.channels, device->spec.freq);
}
if (!device->stream) {
close_audio_device(device);
return 0;
}
}
結(jié)構(gòu)體SDL_AudioStream
struct _SDL_AudioStream
{
SDL_AudioCVT cvt_before_resampling;
SDL_AudioCVT cvt_after_resampling;
SDL_DataQueue *queue;
SDL_bool first_run;
Uint8 *staging_buffer;
int staging_buffer_size;
int staging_buffer_filled;
Uint8 *work_buffer_base; /* maybe unaligned pointer from SDL_realloc(). */
int work_buffer_len;
int src_sample_frame_size;
SDL_AudioFormat src_format;
Uint8 src_channels;
int src_rate;
int dst_sample_frame_size;
SDL_AudioFormat dst_format;
Uint8 dst_channels;
int dst_rate;
double rate_incr;
Uint8 pre_resample_channels;
int packetlen;
int resampler_padding_samples;
float *resampler_padding;
void *resampler_state;
SDL_ResampleAudioStreamFunc resampler_func;
SDL_ResetAudioStreamResamplerFunc reset_resampler_func;
SDL_CleanupAudioStreamResamplerFunc cleanup_resampler_func;
};
這個(gè)結(jié)構(gòu)體继低。保存src和dst 的對(duì)應(yīng)的參數(shù)但金,并通過保存的CVT方法,可以進(jìn)行方便的轉(zhuǎn)換郁季。
- 設(shè)置callback
如果我們不能設(shè)置callback
(音頻數(shù)據(jù)的回調(diào))的話冷溃,SDL會(huì)默認(rèn)給設(shè)置一個(gè)數(shù)據(jù)隊(duì)列的管理钱磅。
因?yàn)橥ǔ#覀儾粫?huì)直接播放 PCM的數(shù)據(jù)似枕,所以都會(huì)自己設(shè)置callback盖淡,在callback當(dāng)中進(jìn)行音頻數(shù)據(jù)的格式轉(zhuǎn)換和數(shù)據(jù)設(shè)置。
if (device->spec.callback == NULL) { /* use buffer queueing? */
/* pool a few packets to start. Enough for two callbacks. */
device->buffer_queue = SDL_NewDataQueue(SDL_AUDIOBUFFERQUEUE_PACKETLEN, obtained->size * 2);
if (!device->buffer_queue) {
close_audio_device(device);
SDL_SetError("Couldn't create audio buffer queue");
return 0;
}
device->callbackspec.callback = iscapture ? SDL_BufferQueueFillCallback : SDL_BufferQueueDrainCallback;
device->callbackspec.userdata = device;
}
- 最后通過一些配置凿歼,然后開啟
SDL_RunAudio線程
褪迟。(因?yàn)槭遣シ牛绻卿浿拼疸荆妥吡硗庖粋€(gè)線程SDL_CaptureAudio
)
device->thread = SDL_CreateThreadInternal(iscapture ? SDL_CaptureAudio : SDL_RunAudio, threadname, stacksize, device);
音頻線程SDL_RunAudio
設(shè)置線程的優(yōu)先級(jí)
SDL_SetThreadPriority(SDL_THREAD_PRIORITY_TIME_CRITICAL)
音頻的線程優(yōu)先級(jí)必須是高味赃。判斷是否關(guān)閉了device
如果關(guān)閉了,就推出循環(huán)虐拓,否則進(jìn)入循環(huán)心俗。
SDL_AtomicGet(&device->shutdown)
可以看到SDL這里的音頻播放的幾個(gè)參數(shù)shutdown
,pause
,enable
都是用了原子性的變量參數(shù),保持其原子性和一致性蓉驹。
- 確定數(shù)據(jù)的buff大小城榛。
if (!device->stream && SDL_AtomicGet(&device->enabled)) {
SDL_assert(data_len == device->spec.size);
data = current_audio.impl.GetDeviceBuf(device);
} else {
/* if the device isn't enabled, we still write to the
work_buffer, so the app's callback will fire with
a regular frequency, in case they depend on that
for timing or progress. They can use hotplug
now to know if the device failed.
Streaming playback uses work_buffer, too. */
data = NULL;
}
if (data == NULL) {
data = device->work_buffer;
}
如果沒有轉(zhuǎn)換流而且有設(shè)備的話,就去取設(shè)備的許可的buf态兴。這個(gè)變量在打開設(shè)備的時(shí)候狠持,進(jìn)行初始化。值為 samples*channels
不是的話瞻润,就用我們初始化時(shí)喘垂,傳入的大小。作為buf.
- 判斷是否callback數(shù)據(jù)
SDL_LockMutex(device->mixer_lock);
if (SDL_AtomicGet(&device->paused)) {
SDL_memset(data, device->spec.silence, data_len);
} else {
callback(udata, data, data_len);
}
SDL_UnlockMutex(device->mixer_lock);
如果是暫停的情況下绍撞,就是簡單設(shè)置數(shù)據(jù)王污,就結(jié)束了。
如果不是暫停的話楚午,就會(huì)進(jìn)入callback(callback中昭齐,我們對(duì)音頻數(shù)據(jù)進(jìn)行讀取,解碼和設(shè)置)
- 播放
if (device->stream) {
/* Stream available audio to device, converting/resampling. */
/* if this fails...oh well. We'll play silence here. */
SDL_AudioStreamPut(device->stream, data, data_len);
while (SDL_AudioStreamAvailable(device->stream) >= ((int) device->spec.size)) {
int got;
data = SDL_AtomicGet(&device->enabled) ? current_audio.impl.GetDeviceBuf(device) : NULL;
got = SDL_AudioStreamGet(device->stream, data ? data : device->work_buffer, device->spec.size);
SDL_assert((got < 0) || (got == device->spec.size));
if (data == NULL) { /* device is having issues... */
const Uint32 delay = ((device->spec.samples * 1000) / device->spec.freq);
SDL_Delay(delay); /* wait for as long as this buffer would have played. Maybe device recovers later? */
} else {
if (got != device->spec.size) {
SDL_memset(data, device->spec.silence, device->spec.size);
}
current_audio.impl.PlayDevice(device);
current_audio.impl.WaitDevice(device);
}
}
} else if (data == device->work_buffer) {
/* nothing to do; pause like we queued a buffer to play. */
const Uint32 delay = ((device->spec.samples * 1000) / device->spec.freq);
SDL_Delay(delay);
} else { /* writing directly to the device. */
/* queue this buffer and wait for it to finish playing. */
current_audio.impl.PlayDevice(device);
current_audio.impl.WaitDevice(device);
}
最后就是進(jìn)行播放矾柜。如果需要轉(zhuǎn)換的話阱驾,就會(huì)先進(jìn)行轉(zhuǎn)換,再播放怪蔑。轉(zhuǎn)換失敗的話里覆,就不會(huì)播放聲音。
最后是通過current_audio.impl.PlayDevice(device)
方法播放
該方法缆瓣,實(shí)際上是調(diào)用了Android_JNI_WriteAudioBuffer(SDL_android.c)
方法喧枷。
因?yàn)槭窃谧泳€程中,所以需要先通過Android_JNI_GetEnv
,來調(diào)用
int status = (*mJavaVM)->AttachCurrentThread(mJavaVM, &env, NULL);
將當(dāng)前的線程和JVM進(jìn)行綁定隧甚,才可以調(diào)用JNI方法车荔。
然后最后調(diào)用的是SDLAudioManager
中的對(duì)應(yīng)的 audioWriteXXXBuffer方法。使用AudioTrack
,將數(shù)據(jù)進(jìn)行write
(實(shí)際上就是播放)
開始或者暫停音頻播放器SDL_PauseAudio
void
SDL_PauseAudioDevice(SDL_AudioDeviceID devid, int pause_on)
{
SDL_AudioDevice *device = get_audio_device(devid);
if (device) {
current_audio.impl.LockDevice(device);
SDL_AtomicSet(&device->paused, pause_on ? 1 : 0);
current_audio.impl.UnlockDevice(device);
}
}
通過上面對(duì)RunAudio線程的分析戚扳,我們知道其是改變device->paused
標(biāo)志位忧便。來回調(diào)callback
。
callback
我們來關(guān)注一下我們?nèi)绾芜M(jìn)行callback
的操作
- 傳遞自己的callback
//通過desired_spec 的callback來傳遞我們自己的callback
wanted_spec.callback = audio_callback;
if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
ALOGE("SDL_OpenAudio: %s \n", SDL_GetError());
return -1;
}
- 定義callback
void audio_callback(void *userdata, Uint8 *stream, int len) {
AVCodecContext *aCodecCtx = (AVCodecContext *) userdata;
int len1, audio_size;
static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
// 這里把得到的數(shù)據(jù)給重置了
SDL_memset(stream, 0, len);
ALOGI("audio_callback len=%d \n", len);
//向設(shè)備發(fā)送長度為len的數(shù)據(jù)
while (len > 0) {
//緩沖區(qū)中無數(shù)據(jù)
if (audio_buf_index >= audio_buf_size) {
//從packet中解碼數(shù)據(jù)
audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);
//ALOGI("audio_decode_frame finish audio_size=%d \n", audio_size);
if (audio_size < 0) //沒有解碼到數(shù)據(jù)或者出錯(cuò)帽借,填充0
{
audio_buf_size = 1024;
memset(audio_buf, 0, audio_buf_size);
} else {
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if (len1 > len)
len1 = len;
//這種方式是可以直接把數(shù)據(jù)復(fù)制過去
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
//通過SDL_MixAudio方法珠增,可以控制音量,如果直接使用memcpy是無法控制音量的
// SDL_MixAudio(stream, audio_buf + audio_buf_index, len1, SDL_MIX_MAXVOLUME);
//SDL_MixAudioFormat()
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
關(guān)閉
- 關(guān)閉
/** This method is called by SDL using JNI. */
public static void audioClose() {
if (mAudioTrack != null) {
mAudioTrack.stop();
mAudioTrack.release();
mAudioTrack = null;
}
}
最后關(guān)閉音頻砍艾,就是將其stop
和release