前面幾章我們分析了UVCCamera的初始化杭抠、預(yù)覽相關(guān)的準(zhǔn)備工作偏灿,本章我們則來看看startPreview的整個(gè)流程硝桩。按照慣例我們先大概看下調(diào)用的時(shí)序圖:
接著之前開啟預(yù)覽過程最終走到AbstractUVCCameraHandler.CameraThread的handleStartPreview方法衙伶,繼而調(diào)用UVCCamera的startPreview,如上圖所示躺同,UVCCamera的startPreview最終調(diào)用到C層的UVCPreview的startPreview方法。
int UVCPreview::startPreview() {
ENTER();
int result = EXIT_FAILURE;
if (!isRunning()) {
mIsRunning = true;
pthread_mutex_lock(&preview_mutex);
{
if (LIKELY(mPreviewWindow)) {
result = pthread_create(&preview_thread, NULL, preview_thread_func, (void *)this);
}
}
pthread_mutex_unlock(&preview_mutex);
if (UNLIKELY(result != EXIT_SUCCESS)) {
LOGW("UVCCamera::window does not exist/already running/could not create thread etc.");
mIsRunning = false;
pthread_mutex_lock(&preview_mutex);
{
pthread_cond_signal(&preview_sync);
}
pthread_mutex_unlock(&preview_mutex);
}
}
RETURN(result, int);
}
在startPreview方法中通過調(diào)用pthread_create起了一個(gè)線程侍芝,該線程中執(zhí)行的內(nèi)容在preview_thread_func中,于是我們繼續(xù)看preview_thread_func:
void *UVCPreview::preview_thread_func(void *vptr_args) {
int result;
ENTER();
UVCPreview *preview = reinterpret_cast<UVCPreview *>(vptr_args);
if (LIKELY(preview)) {
uvc_stream_ctrl_t ctrl;
result = preview->prepare_preview(&ctrl);
if (LIKELY(!result)) {
preview->do_preview(&ctrl);
}
}
PRE_EXIT();
pthread_exit(NULL);
}
這里我們就關(guān)心prepare_preview、do_preview這兩個(gè)方法。先看prepare_preview:
int UVCPreview::prepare_preview(uvc_stream_ctrl_t *ctrl) {
uvc_error_t result;
ENTER();
result = uvc_get_stream_ctrl_format_size_fps(mDeviceHandle, ctrl,
!requestMode ? UVC_FRAME_FORMAT_YUYV : UVC_FRAME_FORMAT_MJPEG,
requestWidth, requestHeight, requestMinFps, requestMaxFps
);
if (LIKELY(!result)) {
#if LOCAL_DEBUG
uvc_print_stream_ctrl(ctrl, stderr);
#endif
uvc_frame_desc_t *frame_desc;
result = uvc_get_frame_desc(mDeviceHandle, ctrl, &frame_desc);
if (LIKELY(!result)) {
frameWidth = frame_desc->wWidth;
frameHeight = frame_desc->wHeight;
LOGI("frameSize=(%d,%d)@%s", frameWidth, frameHeight, (!requestMode ? "YUYV" : "MJPEG"));
pthread_mutex_lock(&preview_mutex);
if (LIKELY(mPreviewWindow)) {
ANativeWindow_setBuffersGeometry(mPreviewWindow,
frameWidth, frameHeight, previewFormat);
}
pthread_mutex_unlock(&preview_mutex);
} else {
frameWidth = requestWidth;
frameHeight = requestHeight;
}
frameMode = requestMode;
frameBytes = frameWidth * frameHeight * (!requestMode ? 2 : 4);
previewBytes = frameWidth * frameHeight * PREVIEW_PIXEL_BYTES;
} else {
LOGE("could not negotiate with camera:err=%d", result);
}
RETURN(result, int);
}
這個(gè)方法主要還是做了一些預(yù)覽的參數(shù)設(shè)置工作,包括幀寬高、根據(jù)色彩模式配置所需要的內(nèi)存空間等端蛆。其中也調(diào)用了ANativeWindow_setBuffersGeometry來更新原生窗口的參數(shù)柔袁。之后我們再看do_preview:
void UVCPreview::do_preview(uvc_stream_ctrl_t *ctrl) {
ENTER();
uvc_frame_t *frame = NULL;
uvc_frame_t *frame_mjpeg = NULL;
uvc_error_t result = uvc_start_streaming_bandwidth(
mDeviceHandle, ctrl, uvc_preview_frame_callback, (void *)this, requestBandwidth, 0);
if (LIKELY(!result)) {
clearPreviewFrame();
pthread_create(&capture_thread, NULL, capture_thread_func, (void *)this);
#if LOCAL_DEBUG
LOGI("Streaming...");
#endif
if (frameMode) {
// MJPEG mode
for ( ; LIKELY(isRunning()) ; ) {
frame_mjpeg = waitPreviewFrame();
if (LIKELY(frame_mjpeg)) {
frame = get_frame(frame_mjpeg->width * frame_mjpeg->height * 2);
result = uvc_mjpeg2yuyv(frame_mjpeg, frame); // MJPEG => yuyv
recycle_frame(frame_mjpeg);
if (LIKELY(!result)) {
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
addCaptureFrame(frame);
} else {
recycle_frame(frame);
}
}
}
} else {
// yuvyv mode
for ( ; LIKELY(isRunning()) ; ) {
frame = waitPreviewFrame();
if (LIKELY(frame)) {
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
addCaptureFrame(frame);
}
}
}
pthread_cond_signal(&capture_sync);
#if LOCAL_DEBUG
LOGI("preview_thread_func:wait for all callbacks complete");
#endif
uvc_stop_streaming(mDeviceHandle);
#if LOCAL_DEBUG
LOGI("Streaming finished");
#endif
} else {
uvc_perror(result, "failed start_streaming");
}
EXIT();
}
在該方法中我們看到調(diào)用了uvc_start_streaming_bandwidth院崇,這個(gè)方式是在libuvc的stream.c中蕉陋。
/** Begin streaming video from the camera into the callback function.
* @ingroup streaming
*
* @param devh UVC device
* @param ctrl Control block, processed using {uvc_probe_stream_ctrl} or
* {uvc_get_stream_ctrl_format_size}
* @param cb User callback function. See {uvc_frame_callback_t} for restrictions.
* @param bandwidth_factor [0.0f, 1.0f]
* @param flags Stream setup flags, currently undefined. Set this to zero. The lower bit
* is reserved for backward compatibility.
*/
uvc_error_t uvc_start_streaming_bandwidth(uvc_device_handle_t *devh,
uvc_stream_ctrl_t *ctrl, uvc_frame_callback_t *cb, void *user_ptr,
float bandwidth_factor,
uint8_t flags) {
uvc_error_t ret;
uvc_stream_handle_t *strmh;
ret = uvc_stream_open_ctrl(devh, &strmh, ctrl);
if (UNLIKELY(ret != UVC_SUCCESS))
return ret;
ret = uvc_stream_start_bandwidth(strmh, cb, user_ptr, bandwidth_factor, flags);
if (UNLIKELY(ret != UVC_SUCCESS)) {
uvc_stream_close(strmh);
return ret;
}
return UVC_SUCCESS;
}
根據(jù)注釋我們可以知道,這個(gè)方法作用就是將相機(jī)采集到的數(shù)據(jù)放到回調(diào)函數(shù)中仅孩,于是我們接著看傳進(jìn)來的回調(diào)函數(shù):uvc_preview_frame_callback
void UVCPreview::uvc_preview_frame_callback(uvc_frame_t *frame, void *vptr_args) {
UVCPreview *preview = reinterpret_cast<UVCPreview *>(vptr_args);
if UNLIKELY(!preview->isRunning() || !frame || !frame->frame_format || !frame->data || !frame->data_bytes) return;
if (UNLIKELY(
((frame->frame_format != UVC_FRAME_FORMAT_MJPEG) && (frame->actual_bytes < preview->frameBytes))
|| (frame->width != preview->frameWidth) || (frame->height != preview->frameHeight) )) {
#if LOCAL_DEBUG
LOGD("broken frame!:format=%d,actual_bytes=%d/%d(%d,%d/%d,%d)",
frame->frame_format, frame->actual_bytes, preview->frameBytes,
frame->width, frame->height, preview->frameWidth, preview->frameHeight);
#endif
return;
}
if (LIKELY(preview->isRunning())) {
uvc_frame_t *copy = preview->get_frame(frame->data_bytes);
if (UNLIKELY(!copy)) {
#if LOCAL_DEBUG
LOGE("uvc_callback:unable to allocate duplicate frame!");
#endif
return;
}
uvc_error_t ret = uvc_duplicate_frame(frame, copy);
if (UNLIKELY(ret)) {
preview->recycle_frame(copy);
return;
}
preview->addPreviewFrame(copy);
}
}
這個(gè)方法前面一些可以先忽略,我們關(guān)心的是怎么樣處理一幀數(shù)據(jù)的因悲,可以看到uvc_frame_t *copy = preview->get_frame(frame->data_bytes);這個(gè)方法:
/**
* get uvc_frame_t from frame pool
* if pool is empty, create new frame
* this function does not confirm the frame size
* and you may need to confirm the size
*/
uvc_frame_t *UVCPreview::get_frame(size_t data_bytes) {
uvc_frame_t *frame = NULL;
pthread_mutex_lock(&pool_mutex);
{
if (!mFramePool.isEmpty()) {
frame = mFramePool.last();
}
}
pthread_mutex_unlock(&pool_mutex);
if UNLIKELY(!frame) {
LOGW("allocate new frame");
frame = uvc_allocate_frame(data_bytes);
}
return frame;
}
先從全局的mFramePool中取出一幀人灼,然后再調(diào)用libuvc中frame.c的方法—— uvc_duplicate_frame來把從相機(jī)獲取到的幀數(shù)據(jù)復(fù)制到剛才mFramePool中取出的*copy中灸芳。
/** @brief Duplicate a frame, preserving color format
* @ingroup frame
*
* @param in Original frame
* @param out Duplicate frame
*/
uvc_error_t uvc_duplicate_frame(uvc_frame_t *in, uvc_frame_t *out) {
if (UNLIKELY(uvc_ensure_frame_size(out, in->data_bytes) < 0))
return UVC_ERROR_NO_MEM;
out->width = in->width;
out->height = in->height;
out->frame_format = in->frame_format;
if (out->library_owns_data)
out->step = in->step;
out->sequence = in->sequence;
out->capture_time = in->capture_time;
out->source = in->source;
out->actual_bytes = in->actual_bytes; // XXX
#if USE_STRIDE // XXX
if (in->step && out->step) {
const int istep = in->step;
const int ostep = out->step;
const int hh = in->height < out->height ? in->height : out->height;
const int rowbytes = istep < ostep ? istep : ostep;
register void *ip = in->data;
register void *op = out->data;
int h;
for (h = 0; h < hh; h += 4) {
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
memcpy(op, ip, rowbytes);
ip += istep; op += ostep;
}
} else {
// compressed format? XXX if only one of the frame in / out has step, this may lead to crash...
memcpy(out->data, in->data, in->actual_bytes);
}
#else
memcpy(out->data, in->data, in->actual_bytes); // XXX
#endif
return UVC_SUCCESS;
}
最后再通過UVCPreview的addPreviewFrame方法將當(dāng)前幀放入previewFrames中。
void UVCPreview::addPreviewFrame(uvc_frame_t *frame) {
pthread_mutex_lock(&preview_mutex);
if (isRunning() && (previewFrames.size() < MAX_FRAME)) {
previewFrames.put(frame);
frame = NULL;
pthread_cond_signal(&preview_sync);
}
pthread_mutex_unlock(&preview_mutex);
if (frame) {
recycle_frame(frame);
}
}
以上是UVCPreview中do_preview方法中有關(guān)于預(yù)覽回調(diào)處理的邏輯。接著我們繼續(xù)回到do_preview的后續(xù)代碼中,核心是這一段代碼:
if (frameMode) {
// MJPEG mode
for ( ; LIKELY(isRunning()) ; ) {
frame_mjpeg = waitPreviewFrame();
if (LIKELY(frame_mjpeg)) {
frame = get_frame(frame_mjpeg->width * frame_mjpeg->height * 2);
result = uvc_mjpeg2yuyv(frame_mjpeg, frame); // MJPEG => yuyv
recycle_frame(frame_mjpeg);
if (LIKELY(!result)) {
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
addCaptureFrame(frame);
} else {
recycle_frame(frame);
}
}
}
} else {
// yuvyv mode
for ( ; LIKELY(isRunning()) ; ) {
frame = waitPreviewFrame();
if (LIKELY(frame)) {
frame = draw_preview_one(frame, &mPreviewWindow, uvc_any2rgbx, 4);
addCaptureFrame(frame);
}
}
}
大概意思就是根據(jù)設(shè)置的模式來處理幀數(shù)據(jù)拇派,其中MJPEG模式下只是比yuvyv多了一步轉(zhuǎn)換工作件豌,調(diào)用的是libuvc中frame-mjpeg.c的uvc_mjpeg2yuyv方法,這里就不展開,感興趣的讀者可以去查看一下該方法的源碼调衰。我們再回到do_preview方法中淮逻,無論是MJPEG還是yuvyv筛严,最終都會調(diào)用draw_preview_one方法匈棘,聽這個(gè)方法名字就能大概知道,這里是把最終采集到的數(shù)據(jù)繪制到原生窗口上:
// changed to return original frame instead of returning converted frame even if convert_func is not null.
uvc_frame_t *UVCPreview::draw_preview_one(uvc_frame_t *frame, ANativeWindow **window, convFunc_t convert_func, int pixcelBytes) {
// ENTER();
int b = 0;
pthread_mutex_lock(&preview_mutex);
{
b = *window != NULL;
}
pthread_mutex_unlock(&preview_mutex);
if (LIKELY(b)) {
uvc_frame_t *converted;
if (convert_func) {
converted = get_frame(frame->width * frame->height * pixcelBytes);
if LIKELY(converted) {
b = convert_func(frame, converted);
if (!b) {
pthread_mutex_lock(&preview_mutex);
copyToSurface(converted, window);
pthread_mutex_unlock(&preview_mutex);
} else {
LOGE("failed converting");
}
recycle_frame(converted);
}
} else {
pthread_mutex_lock(&preview_mutex);
copyToSurface(frame, window);
pthread_mutex_unlock(&preview_mutex);
}
}
return frame; //RETURN(frame, uvc_frame_t *);
}
根據(jù)源碼可知,核心是將準(zhǔn)備好的frame通過調(diào)用copyToSurface方法來繪制到ANativeWindow上:
// transfer specific frame data to the Surface(ANativeWindow)
int copyToSurface(uvc_frame_t *frame, ANativeWindow **window) {
// ENTER();
int result = 0;
if (LIKELY(*window)) {
ANativeWindow_Buffer buffer;
if (LIKELY(ANativeWindow_lock(*window, &buffer, NULL) == 0)) {
// source = frame data
const uint8_t *src = (uint8_t *)frame->data;
const int src_w = frame->width * PREVIEW_PIXEL_BYTES;
const int src_step = frame->width * PREVIEW_PIXEL_BYTES;
// destination = Surface(ANativeWindow)
uint8_t *dest = (uint8_t *)buffer.bits;
const int dest_w = buffer.width * PREVIEW_PIXEL_BYTES;
const int dest_step = buffer.stride * PREVIEW_PIXEL_BYTES;
// use lower transfer bytes
const int w = src_w < dest_w ? src_w : dest_w;
// use lower height
const int h = frame->height < buffer.height ? frame->height : buffer.height;
// transfer from frame data to the Surface
copyFrame(src, dest, w, h, src_step, dest_step);
ANativeWindow_unlockAndPost(*window);
} else {
result = -1;
}
} else {
result = -1;
}
return result; //RETURN(result, int);
}
static void copyFrame(const uint8_t *src, uint8_t *dest, const int width, int height, const int stride_src, const int stride_dest) {
const int h8 = height % 8;
for (int i = 0; i < h8; i++) {
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
}
for (int i = 0; i < height; i += 8) {
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
memcpy(dest, src, width);
dest += stride_dest; src += stride_src;
}
}
小結(jié)
至此喜每,UVCCamera的預(yù)覽功能大概就分析到這刑巧,整個(gè)流程還是比較清晰的特幔,但是可能我講得不夠清晰……后續(xù)我再慢慢完善,請大家見諒。