背景:
學過或者你看過surfaceflinger相關文章同學都知道,vsync其實都是由surfaceflinger軟件層面進行模擬的因妇,但是軟件模擬有可能會有誤差或偏差兄旬,這個時候就需要有個硬件vsync幫忙校準潘鲫。
故才會在surfaceflinger的systrace出現(xiàn)如下校準波形圖,這個可以看到硬件vsync開啟后才有hw的vsync的脈沖產(chǎn)生录肯,這個剛好可以看到成對的一上一下脈沖剛好6個,也就是經(jīng)车跛担看到的6個周期的论咏,經(jīng)過這個6個硬件vsync的校準后,軟件vsync就可以調整正常颁井。
但是厅贪。。雅宾。一切一切都是好像是這么一回事养涮,具體怎么就產(chǎn)生這些硬件vsync的波形的呢?硬件vsync為啥說他來自硬件呢眉抬?怎么就來自硬件呢贯吓?好像一切都好虛是不是,那不是肯定的嗎蜀变?那么今天就來徹底解密一下這個硬件vsync的執(zhí)行哈悄谐,注意下面很多kernel驅動相關代碼。昏苏。尊沸。威沫。。具體相關代碼付費課學員直接就會配套有哈洼专。
更多framework實戰(zhàn)課可以加V:androidframework007
surfaceflinger端硬件Vsync的調用trace:
這個相對很好看到如下圖所示:
明顯看到這個東西實際是hal進程通過跨進程回調到了surfaceflinger進程的棒掠,那么就來看看hal部分
hal端的回調硬件Vsync堆棧
首先來看看我們的trace圖形,這個直接surfaceflinger箭頭點擊一跳就可以了
這個時候就到了graphic的hal進程相關trace圖形如下
可以看到這個確實是hal端的SDM_EventThread線程進行調用的屁商,這里按著代碼一直追的化追到如下地方了:
11-04 23:21:53.200 977 1065 D Vsync : #00 pc 000000000003b65c /vendor/lib64/hw/hwcomposer.msm8998.so (sdm::HWCCallbacks::Vsync(unsigned long, long)+76)
11-04 23:21:53.200 977 1065 D Vsync : #01 pc 000000000002f9c8 /vendor/lib64/hw/hwcomposer.msm8998.so (sdm::HWCDisplay::VSync(sdm::DisplayEventVSync const&)+28)
11-04 23:21:53.200 977 1065 D Vsync : #02 pc 000000000002c818 /vendor/lib64/libsdmcore.so (non-virtual thunk to sdm::DisplayPrimary::VSync(long)+68)
11-04 23:21:53.200 977 1065 D Vsync : #03 pc 0000000000048dc8 /vendor/lib64/libsdmcore.so (sdm::HWEvents::DisplayEventHandler()+288)
11-04 23:21:53.200 977 1065 D Vsync : #04 pc 0000000000048b60 /vendor/lib64/libsdmcore.so (sdm::HWEvents::DisplayEventThread(void*)+16)
11-04 23:21:53.200 977 1065 D Vsync : #05 pc 00000000000b63b0 /apex/com.android.runtime/lib64/bionic/libc.so (__pthread_start(void*)+208)
11-04 23:21:53.200 977 1065 D Vsync : #06 pc 00000000000530b8 /apex/com.android.runtime/lib64/bionic/libc.so (__start_thread+64)
這里就來從最根部的地方開始看到底hal的這個事件是誰觸發(fā)的烟很?是hal直接和硬件通訊?哈哈蜡镶,想想也不可能是吧雾袱,因為hal進程也是個應用空間的程序而已,無法直接操作硬件官还。直接上代碼揭曉答案:
//初始化相關的poll的fd
pollfd HWEvents::InitializePollFd(HWEventData *event_data) {
char node_path[kMaxStringLength] = {0};
char data[kMaxStringLength] = {0};
pollfd poll_fd = {0};
poll_fd.fd = -1;
if (event_data->event_type == HWEvent::EXIT) {
// Create an eventfd to be used to unblock the poll system call when
// a thread is exiting.
poll_fd.fd = Sys::eventfd_(0, 0);
poll_fd.events |= POLLIN;
exit_fd_ = poll_fd.fd;
} else {
snprintf(node_path, sizeof(node_path), "%s%d/%s", fb_path_, fb_num_,
map_event_to_node_[event_data->event_type]);
poll_fd.fd = Sys::open_(node_path, O_RDONLY);
poll_fd.events |= POLLPRI | POLLERR;
}
if (poll_fd.fd < 0) {
DLOGW("open failed for display=%d event=%s, error=%s", fb_num_,
map_event_to_node_[event_data->event_type], strerror(errno));
return poll_fd;
}
// Read once on all fds to clear data on all fds.
Sys::pread_(poll_fd.fd, data , kMaxStringLength, 0);
return poll_fd;
}
//設置相關的event_type解析方法
DisplayError HWEvents::SetEventParser(HWEvent event_type, HWEventData *event_data) {
DisplayError error = kErrorNone;
switch (event_type) {
case HWEvent::VSYNC:
event_data->event_parser = &HWEvents::HandleVSync;
break;
case HWEvent::IDLE_NOTIFY:
event_data->event_parser = &HWEvents::HandleIdleTimeout;
break;
case HWEvent::EXIT:
event_data->event_parser = &HWEvents::HandleThreadExit;
break;
case HWEvent::SHOW_BLANK_EVENT:
event_data->event_parser = &HWEvents::HandleBlank;
break;
case HWEvent::THERMAL_LEVEL:
event_data->event_parser = &HWEvents::HandleThermal;
break;
case HWEvent::IDLE_POWER_COLLAPSE:
event_data->event_parser = &HWEvents::HandleIdlePowerCollapse;
break;
default:
error = kErrorParameters;
break;
}
return error;
}
void HWEvents::PopulateHWEventData() {
for (uint32_t i = 0; i < event_list_.size(); i++) {
HWEventData event_data;
event_data.event_type = event_list_[i];
SetEventParser(event_list_[i], &event_data);
poll_fds_[i] = InitializePollFd(&event_data);
event_data_list_.push_back(event_data);
}
}
DisplayError HWEvents::Init(int fb_num, HWEventHandler *event_handler,
const vector<HWEvent> &event_list) {
//創(chuàng)建線程執(zhí)行循環(huán)poll
if (pthread_create(&event_thread_, NULL, &DisplayEventThread, this) < 0) {
DLOGE("Failed to start %s, error = %s", event_thread_name_.c_str());
return kErrorResources;
}
return kErrorNone;
}
void* HWEvents::DisplayEventThread(void *context) {
if (context) {
return reinterpret_cast<HWEvents *>(context)->DisplayEventHandler();
}
return NULL;
}
//真的線程執(zhí)行體
void* HWEvents::DisplayEventHandler() {
char data[kMaxStringLength] = {0};
prctl(PR_SET_NAME, event_thread_name_.c_str(), 0, 0, 0);
setpriority(PRIO_PROCESS, 0, kThreadPriorityUrgent);
//一直循環(huán)poll中的數(shù)據(jù)
while (!exit_threads_) {
int error = Sys::poll_(poll_fds_.data(), UINT32(event_list_.size()), -1);
if (error <= 0) {
DLOGW("poll failed. error = %s", strerror(errno));
continue;
}
//poll跳出阻塞說明有數(shù)據(jù)芹橡,識別數(shù)據(jù)執(zhí)行相關調用的操作
for (uint32_t event = 0; event < event_list_.size(); event++) {
pollfd &poll_fd = poll_fds_[event];
if (event_list_.at(event) == HWEvent::EXIT) {
if ((poll_fd.revents & POLLIN) && (Sys::read_(poll_fd.fd, data, kMaxStringLength) > 0)) {
(this->*(event_data_list_[event]).event_parser)(data);
}
} else {
if ((poll_fd.revents & POLLPRI) &&
(Sys::pread_(poll_fd.fd, data, kMaxStringLength, 0) > 0)) {
(this->*(event_data_list_[event]).event_parser)(data);
}
}
}
}
pthread_exit(0);
return NULL;
}
//會回調到這個方法
void HWEvents::HandleVSync(char *data) {
int64_t timestamp = 0;
if (!strncmp(data, "VSYNC=", strlen("VSYNC="))) {
timestamp = strtoll(data + strlen("VSYNC="), NULL, 0);
}
event_handler_->VSync(timestamp);
}
} // namespace sdm
上面代碼有注釋,大家是不是看到熟悉的poll望伦,是不是學了馬哥跨進程專題后林说,這個都不是事分分鐘可以看的懂這個邏輯,核心的就是觀察相關的vsync的fd屯伞,有數(shù)據(jù)變化了腿箩,讀取,屬于vsync了就觸發(fā)相關的劣摇,vsync回調珠移,這個就是hal的vsync回調
總結其實hal的vsync回調也是監(jiān)聽的fd而已,沒啥特殊末融,其實你說surfaceflinger是不是也可以監(jiān)聽fd直接拿不就行了么钧惧。。滑潘。哈哈哈確實可以垢乙,不過畢竟各個硬件廠商實現(xiàn)不一樣,你不能保證其他家也這樣實現(xiàn)语卤,所以hal就是這個另一個作用就是解耦system 的aosp部分和vendor廠商的變化部分追逮。
但是問題又來了,請問是誰觸發(fā)了這個fd有數(shù)據(jù)的按舛妗钮孵?
kernel進行fd的數(shù)據(jù)通知:
上面hal監(jiān)聽的fd來自哪里?其實大家猜想肯定應該是內(nèi)核,因為畢竟是硬件vsync眼滤,所以可以觸碰硬件東西當然是我們的內(nèi)核驅動巴席。這里最后找到如下代碼:
drivers/video/fbdev/msm/mdss_mdp_overlay.c
//中斷中調用的
/* function is called in irq context should have minimum processing */
static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
ktime_t t)
{
dump_stack();
ATRACE_BEGIN("mdss_mdp_overlay_handle_vsync");
struct msm_fb_data_type *mfd = NULL;
struct mdss_overlay_private *mdp5_data = NULL;
if (!ctl) {
pr_err("ctl is NULL\n");
return;
}
mfd = ctl->mfd;
if (!mfd || !mfd->mdp.private1) {
pr_warn("Invalid handle for vsync\n");
return;
}
mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data) {
pr_err("mdp5_data is NULL\n");
return;
}
pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
mdp5_data->vsync_time = t;
sysfs_notify_dirent(mdp5_data->vsync_event_sd);//進行的fd數(shù)據(jù)通知
ATRACE_END("mdss_mdp_overlay_handle_vsync");
}
驅動是vsync是靠相關的硬件中斷觸發(fā)的,具體的call stack如下:
11-05 00:24:24.470 0 0 I Call trace:
11-05 00:24:24.470 0 0 I : [<ffffff91f4c8a874>] dump_backtrace+0x0/0x3a8
11-05 00:24:24.470 0 0 I : [<ffffff91f4c8a86c>] show_stack+0x14/0x1c
11-05 00:24:24.470 0 0 I : [<ffffff91f50280d0>] dump_stack+0xe4/0x11c
11-05 00:24:24.470 0 0 I : [<ffffff91f51037b4>] mdss_mdp_overlay_handle_vsync+0x20/0x1dc
11-05 00:24:24.470 0 0 I : [<ffffff91f50f26d0>] mdss_mdp_cmd_readptr_done+0x154/0x33c
11-05 00:24:24.470 0 0 I : [<ffffff91f50b9534>] mdss_mdp_isr+0x140/0x3bc
11-05 00:24:24.470 0 0 I : [<ffffff91f5164d94>] mdss_irq_dispatch+0x50/0x68
11-05 00:24:24.470 0 0 I : [<ffffff91f50c15cc>] mdss_irq_handler+0x84/0x1c0
11-05 00:24:24.470 0 0 I : [<ffffff91f4d1a92c>] handle_irq_event_percpu+0x78/0x28c
11-05 00:24:24.470 0 0 I : [<ffffff91f4d1abc8>] handle_irq_event+0x44/0x74
11-05 00:24:24.470 0 0 I : [<ffffff91f4d1e714>] handle_fasteoi_irq+0xd8/0x1b0
11-05 00:24:24.470 0 0 I : [<ffffff91f4d1a118>] __handle_domain_irq+0x7c/0xbc
11-05 00:24:24.470 0 0 I : [<ffffff91f4c811d0>] gic_handle_irq+0x80/0x144
這里通過的給內(nèi)核kernel打上trace結合看如下:
是不是結合trace看起來很方便诅需,就可以清晰知道了整個調用流程:
硬件觸發(fā) kernel中斷方法漾唉,寫入對于的fd
---》hal進程監(jiān)聽fd荧库,然后解析回調vsync進行跨進程通訊
----》surfaceflinger收到hal跨進程調用,改變自己相關的參數(shù)赵刑,調整自己軟件vsync