Linux epoll源碼剖析
linux內核版本:2.6.34
在讀epoll源碼前馏臭,需要先了解的知識點:
- 等待隊列
- 文件系統(tǒng)(主要是進程的打開文件描述符表以及struct file)
- poll機制
- 資源注冊監(jiān)聽
poll() -> poll_wait(struct file *, wait_queue_t *, poll_table *pt) -> pt->qproc(struct file *, wait_queue_t *, poll_table *)
- 資源就緒通知
callback_function(wait_queue_t *, unsigned mode, int sync, void *key)
- 資源注冊監(jiān)聽
- epoll主要數(shù)據(jù)結構
- 一個epoll實例對應一個struct eventpoll(在用戶空間以epollfd指向)
- 一個監(jiān)聽事件對應一個struct epitem(epoll_ctl()操作的就是epitem)
先引用一下《追蹤Linux Tcp/Ip代碼運行:基于2.6內核》中的一段話:
試想一下野蝇,程序員在編寫程序時是先定義結構體還是先編寫函數(shù)?答案可能有兩種:第一種是先編寫函數(shù)括儒,根據(jù)函數(shù)的過程來產(chǎn)生結構體的需求從而有了結構體的定義绕沈;第二種是按照協(xié)議規(guī)定,如TCP頭部和IP頭部結構體的定義帮寻,這些是協(xié)議規(guī)定的結構體,因而結構體定義在先操漠,函數(shù)編寫在后。兩種答案雖然相反帐要,可是深入思考一下協(xié)議的由來也是經(jīng)過實踐總結而來的愤炸,從而得到了從實踐到理論的結論狂芋。
有時我們需要站在程序員的角度來理解結構體的作用和定義死陆,逆向推理結構體是因何產(chǎn)生、因何而用蚤告,這種方式不但提高了理解终抽、閱讀代碼的水平持舆,更能增強邏輯思維的推理能力吧享,進而面對任意一段代碼的時候從容不迫而游刃有余耻卡。
Structures
/*
* This structure is stored inside the "private_data" member of the file
* structure and rapresent the main data sructure for the eventpoll
* interface.
*/
struct eventpoll {
/* Protect the this structure access */
spinlock_t lock;
/*
* This mutex is used to ensure that files are not removed
* while epoll is using them. This is held during the event
* collection loop, the file cleanup path, the epoll file exit
* code and the ctl operations.
*/
struct mutex mtx;
/* Wait queue used by sys_epoll_wait() */
/* 阻塞在epoll_wait()當前epoll實例的用戶被鏈接到這個等待隊列 */
wait_queue_head_t wq;
/* Wait queue used by file->poll() */
/* epoll文件也可以被epoll_wait() */
wait_queue_head_t poll_wait;
/* List of ready file descriptors */
/* 已經(jīng)ready的epitem的鏈表 */
struct list_head rdllist;
/* RB tree root used to store monitored fd structs */
/* 存儲epitem */
struct rb_root rbr;
/*
* This is a single linked list that chains all the "struct epitem" that
* happened while transfering ready events to userspace w/out
* holding ->lock.
*/
/* 見ep_poll_callback()以及ep_scan_ready_list()中的注釋 */
struct epitem *ovflist;
/* The user that created the eventpoll descriptor */
/* 創(chuàng)建當前epoll實例的用戶 */
struct user_struct *user;
};
/*
* Each file descriptor added to the eventpoll interface will
* have an entry of this type linked to the "rbr" RB tree.
*/
struct epitem {
/* RB tree node used to link this structure to the eventpoll RB tree */
/* eventpoll內部的紅黑樹的掛載點 */
struct rb_node rbn;
/* List header used to link this structure to the eventpoll ready list */
/* 所有已經(jīng)ready的epitem都會被掛載到eventpoll的rdllist中 */
struct list_head rdllink;
/*
* Works together "struct eventpoll"->ovflist in keeping the
* single linked chain of items.
*/
/* 配合eventpoll->ovflist使用 */
struct epitem *next;
/* The file descriptor information this item refers to */
/*
* 作為evetnpoll內部的紅黑樹節(jié)點的key
*/
struct epoll_filefd ffd;
/* Number of active wait queue attached to poll operations */
/* 監(jiān)聽隊列掛載數(shù) */
/* 難道一個epitem還能同時掛載到多個監(jiān)聽隊列锡凝? */
int nwait;
/* List containing poll wait queues */
/* 鏈接當前epitem對應的eppoll_entry結構 */
struct list_head pwqlist;
/* The "container" of this item */
/* 關聯(lián)當前epitem所屬的epollevent */
struct eventpoll *ep;
/* List header used to link this item to the "struct file" items list */
/* 與所監(jiān)聽的struct file進行鏈接 */
struct list_head fllink;
/* The structure that describe the interested events and the source fd */
/* 通過epoll_ctl從用戶空間傳過來的數(shù)據(jù),表示當前epitem關心的events */
struct epoll_event event;
};
struct epoll_filefd {
struct file *file;
int fd;
};
struct epoll_event {
__u32 events;
__u64 data;
};
/* Wrapper struct used by poll queueing */
struct ep_pqueue {
poll_table pt;
struct epitem *epi;
};
/*
* structures and helpers for f_op->poll implementations
*/
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
typedef struct poll_table_struct {
poll_queue_proc qproc;
unsigned long key;
} poll_table;
/* Wait structure used by the poll hooks */
/* 掛載到資源文件監(jiān)聽隊列中的鉤子結構 */
struct eppoll_entry {
/* List header used to link this structure to the "struct epitem" */
/* 與其關聯(lián)的epitem進行鏈接 */
struct list_head llink;
/* The "base" pointer is set to the container "struct epitem" */
/* 指向對應的epitem結構 */
/*
* 既然llink字段已經(jīng)與對應的epitem結構進行了鏈接酥郭,為什么還需要
* 一個base指針指向對應的epitem逞刷???
*/
struct epitem *base;
/*
* Wait queue item that will be linked to the target file wait
* queue head.
*/
/* 掛載到資源文件監(jiān)聽隊列的節(jié)點 */
wait_queue_t wait;
/* The wait queue head that linked the "wait" wait queue item */
/* 資源監(jiān)聽隊列隊列頭 */
wait_queue_head_t *whead;
};
/* Used by the ep_send_events() function as callback private data */
struct ep_send_events_data {
int maxevents;
struct epoll_event __user *events;
};
調用鏈:
sys_epoll_create() -> sys_epoll_create1() -> ep_alloc()
-> anon_inode_getfd()
sys_epoll_ctl(EPOLL_CTL_ADD) -> ep_insert() -> f_op->poll() -> poll_wait() -> ep_ptable_queue_proc()
-> ep_rbtree_insert()
-> wake_up
sys_epoll_ctl(EPOLL_CTL_DEL) -> ep_remove() -> ep_unregister_pollwait()
-> ep_erase()
sys_epoll_ctl(EPOLL_CTL_MOD) -> ep_modify() -> f_op->poll()
-> wake_up
sys_epoll_wait() -> ep_poll() -> block
-> ep_send_events() -> ep_scan_ready_list() -> ep_send_events_proc()
-> wake_up
ep_poll_callback() -> wake_up
epoll_create()
SYSCALL_DEFINE1(epoll_create, int, size)
{
if (size <= 0)
return -EINVAL;
/* 調用sys_epoll_create1()執(zhí)行真正的epoll實例創(chuàng)建 */
return sys_epoll_create1(0);
}
/*
* Open an eventpoll file descriptor.
*/
SYSCALL_DEFINE1(epoll_create1, int, flags)
{
int error;
struct eventpoll *ep = NULL;
/* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
/* 只關心EPOLL_CLOEXEC標志 */
if (flags & ~EPOLL_CLOEXEC)
return -EINVAL;
/*
* Create the internal data structure ("struct eventpoll").
*/
/* 分配并初始化一個eventpoll結構體 */
error = ep_alloc(&ep);
if (error < 0)
return error;
/*
* Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor.
*/
/*
* 從anon_inode_mnt文件系統(tǒng)中分配一個(inode, dentry, file)三元組,然后
* 將file映射到文件描述符并安裝到當前進程的文件描述符表fdtable中
*
* anon_inode_mnt文件系統(tǒng)不存在磁盤映像衔统,類似于socket沒有一個真實的磁盤
* 文件與其對應一樣咬腕。從這個匿名文件系統(tǒng)中分配的文件主要用于將資源映射到
* 文件描述符...
*
* 分配file結構之后將eventpoll掛載到它的private_data成員上筛峭,以便能夠通
* 過文件描述符獲得這個eventpoll
*
* file支持的操作由eventpoll_fops指出,可以看到它只支持release與poll,
* 其中release()在file析構時析構并釋放掉掛載到其上的eventpoll結構
*/
error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
O_RDWR | (flags & O_CLOEXEC));
if (error < 0)
ep_free(ep);
return error;
}
static int ep_alloc(struct eventpoll **pep)
{
int error;
struct user_struct *user;
struct eventpoll *ep;
/* 獲取當前用戶上下文的用戶信息 */
user = get_current_user();
error = -ENOMEM;
/* 調用kmalloc狡门,分配一個eventpoll結構體的空間 */
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
goto free_uid;
/* 初始化 */
spin_lock_init(&ep->lock);
mutex_init(&ep->mtx);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
/* 一顆空的紅黑樹 */
ep->rbr = RB_ROOT;
/* 注意 */
ep->ovflist = EP_UNACTIVE_PTR;
ep->user = user;
*pep = ep;
return 0;
free_uid:
free_uid(user);
return error;
}
epoll_ctl()
/*
* The following function implements the controller interface for
* the eventpoll file that enables the insertion/removal/change of
* file descriptors inside the interest set.
*/
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
int error;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
struct epoll_event epds;
error = -EFAULT;
/* 參數(shù)驗證,并將epoll_event從用戶空間拷貝到內核空間 */
if (ep_op_has_event(op) &&
copy_from_user(&epds, event, sizeof(struct epoll_event)))
goto error_return;
/* Get the "struct file *" for the eventpoll file */
error = -EBADF;
/* 獲取eventpoll文件描述符對應的struct file結構 */
file = fget(epfd);
if (!file)
goto error_return;
/* Get the "struct file *" for the target file */
/* 獲取需要被監(jiān)聽的文件描述符對應的struct file結構 */
tfile = fget(fd);
if (!tfile)
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
/* 需要被監(jiān)聽的文件必須支持poll() */
if (!tfile->f_op || !tfile->f_op->poll)
goto error_tgt_fput;
/*
* We have to check that the file structure underneath the file descriptor
* the user passed to us _is_ an eventpoll file. And also we do not permit
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
/*
* 1. epoll實例不能監(jiān)聽自己嗜价,不然事件發(fā)生的時候會形成通知死循環(huán)...
* 2. 驗證epfd指向的文件是否是epoll文件田度,其實內核好多文件驗證都是
* 根據(jù)文件的操作集來判斷的...
*/
if (file == tfile || !is_file_epoll(file))
goto error_tgt_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
/* 取出掛載到epoll文件中的eventpoll */
ep = file->private_data;
/* mutex加鎖:保護epitem,防止持有epitem的時候樟氢,它被異步刪除 */
mutex_lock(&ep->mtx);
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
/*
* eventpoll用一顆紅黑樹來存儲監(jiān)聽事件epitem冈绊,
* 并且以(file, fd)二元組作為key
*
* ep_find()執(zhí)行紅黑樹的二叉搜索,尋找(file, fd)對應的監(jiān)聽事件epitem
*/
epi = ep_find(ep, tfile, fd);
error = -EINVAL;
/* 執(zhí)行具體操作op */
/* 注意哦:ep_insert()埠啃、ep_remove()死宣、ep_modify()函數(shù)調用鏈都在mtx鎖之下 */
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
/* epoll_wait()總是監(jiān)聽POLLERR和POLLHUP */
epds.events |= POLLERR | POLLHUP;
error = ep_insert(ep, &epds, tfile, fd);
} else
error = -EEXIST;
break;
case EPOLL_CTL_DEL:
if (epi)
error = ep_remove(ep, epi);
else
error = -ENOENT;
break;
case EPOLL_CTL_MOD:
if (epi) {
/* epoll_wait()總是監(jiān)聽POLLERR和POLLHUP */
epds.events |= POLLERR | POLLHUP;
error = ep_modify(ep, epi, &epds);
} else
error = -ENOENT;
break;
}
mutex_unlock(&ep->mtx);
error_tgt_fput:
fput(tfile);
error_fput:
fput(file);
error_return:
return error;
}
/*
* Search the file inside the eventpoll tree. The RB tree operations
* are protected by the "mtx" mutex, and ep_find() must be called with
* "mtx" held.
*/
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
{
int kcmp;
struct rb_node *rbp;
struct epitem *epi, *epir = NULL;
struct epoll_filefd ffd;
/* 使用epoll_filefd結構體封裝(file, fd)二元組而形成key */
ep_set_ffd(&ffd, file, fd);
/* 二叉搜索,尋找監(jiān)聽事件epitem */
for (rbp = ep->rbr.rb_node; rbp; ) {
epi = rb_entry(rbp, struct epitem, rbn);
kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
if (kcmp > 0)
rbp = rbp->rb_right;
else if (kcmp < 0)
rbp = rbp->rb_left;
else {
epir = epi;
break;
}
}
return epir;
}
/*
* Must be called with "mtx" held.
*/
static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
struct file *tfile, int fd)
{
int error, revents, pwake = 0;
unsigned long flags;
struct epitem *epi;
struct ep_pqueue epq;
/* 用戶資源限制驗證 */
if (unlikely(atomic_read(&ep->user->epoll_watches) >=
max_user_watches))
return -ENOSPC;
/* 從slab中分配一個epitem */
if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
return -ENOMEM;
/* Item initialization follow here ... */
/* 初始化剛剛分配的epitem */
INIT_LIST_HEAD(&epi->rdllink);
INIT_LIST_HEAD(&epi->fllink);
INIT_LIST_HEAD(&epi->pwqlist);
epi->ep = ep;
ep_set_ffd(&epi->ffd, tfile, fd);
epi->event = *event;
epi->nwait = 0;
/* 注意 */
epi->next = EP_UNACTIVE_PTR;
/* Initialize the poll table using the queue callback */
/*
* 注意:
* epitem與poll_table被封裝在了一個結構體中碴开,以便之后向
* 資源注冊監(jiān)聽的時候毅该,能夠用poll_table得到對應的epitem
*/
/* 將epitem掛載到這個ep_pqueue結構體中 */
epq.epi = epi;
/*
* 初始化ep_pqueue中的poll_table:
* 1. 設置監(jiān)聽注冊函數(shù)為ep_ptable_queue_proc
* 2. 設置想要監(jiān)聽的事件為所有事件
*
* 小心,很多博客甚至源碼原注釋都將監(jiān)聽注冊函數(shù)叫做回調函數(shù)...
* 其實它根本就沒有任何信息回調潦牛,所以別被誤導...
*/
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
/*
* Attach the item to the poll hooks and get current event bits.
* We can safely use the file* here because its usage count has
* been increased by the caller of this function. Note that after
* this operation completes, the poll callback can start hitting
* the new item.
*/
/* NOTE THAT:
* 為了更舒服的閱讀眶掌,這里的細節(jié)完全沒必要了解...
* 只需要知道這個調用最終做了什么事情就行...
*/
/*
* 只有管道、套接字這些特殊設備文件才支持poll()巴碗,而在ext2/ext3/ext4
* 這些塊設備上的文件不支持poll()朴爬,因為塊設備文件不支持阻塞讀啊...
* 有數(shù)據(jù)就返回數(shù)據(jù),沒有數(shù)據(jù)就返回0表示end-of-file...
*
* 我們以ipv4_tcp套接字舉例:
* 1. sys_socketcall() -> sys_socket() -> sock_create() ->
* __sock_create() -> net_families[PF_INET]->create() ==>
* inet_create(): socket->ops = &inet_stream_ops
* 2. sys_socketcall() -> sys_socket() -> sock_map_fd() ->
* sock_alloc_file() -> alloc_file():
* file->f_op = &socket_file_ops
* 當使用socket(PF_INET, SOCK_STREAM, 0)創(chuàng)建套接字時良价,根據(jù)協(xié)議類型最終
* 設置socket的操作集ops為tcp_stream_ops寝殴,其中poll ==> tcp_poll蒿叠,
* 在之后將socket與文件進行關聯(lián)時,設置文件操作集f_op為socket_file_ops蚣常,
* 其中poll ==> sock_poll
*
* 3. [下面的代碼] tfile->f_op->poll() ==>
* socket_file_ops.poll() ==> sock_poll() ->
* socket->ops->poll() ==> tcp_poll() ->
* sock_poll_wait() -> poll_wait()
* 當我們對socket對應的文件進行poll()時市咽,會調用socket特定的poll()操作,
* 也就是以第3點所示的調用鏈那樣最終調用poll_wait()
*
* 4. [下面的代碼] poll_wait() -> epq.pt.qproc() ==>
* ep_ptable_queue_proc()
* 在poll_wait()中會調用我們傳給它的poll_table中的proc函數(shù)抵蚊,也就是我們
* 上一步在init_poll_funcptr()中設置的ep_ptable_queue_proc函數(shù)
*
* 所以說了這么多施绎,也就第4步是關鍵...
* 內核被設計得這么復雜的原因是為了能有更好的擴展性...
*/
/*
* 最終做的事:
* 就是將eventpoll中的監(jiān)聽事件epitem通過eppoll_entry的封裝掛載到資源文件
* 的監(jiān)聽隊列。之后資源文件事件就緒贞绳,就會調用隊列中所有節(jié)點的回調函數(shù)谷醉,
* 從而通知監(jiān)聽者...
*/
/*
* f_op->poll()還會返回文件當前的文件狀態(tài)
*/
revents = tfile->f_op->poll(tfile, &epq.pt);
/*
* We have to check if something went wrong during the poll wait queue
* install process. Namely an allocation for a wait queue failed due
* high memory pressure.
*/
error = -ENOMEM;
if (epi->nwait < 0)
goto error_unregister;
/* Add the current item to the list of active epoll hook for this file */
/* spinlock加鎖:保護struct file的訪問 */
spin_lock(&tfile->f_lock);
/*
* 將epitem與它需要監(jiān)聽的文件鏈接起來
* struct file結構中的f_ep_links字段鏈接了所有需要監(jiān)聽它的epitem
*/
list_add_tail(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
/* 將epitem添加到eventpoll的紅黑樹當中 */
/*
* 來看一下為什么不需要ep->lock加鎖:
* 紅黑樹節(jié)點增刪操作:
* 1. epoll_ctl() -> ep_insert()
* 2. epoll_tcl() -> ep_remove()
* 3. eventpoll_release_file() -> ep_remove()
* 這三個函數(shù)在修改紅黑樹前都加了ep->mtx鎖,所以不必再加ep->lock鎖
*/
ep_rbtree_insert(ep, epi);
/* We have to drop the new item inside our item list to keep track of it */
/* spinlock加鎖:保護eventpoll的訪問 */
spin_lock_irqsave(&ep->lock, flags);
/* If the file is already "ready" we drop it inside the ready list */
/*
* 如果資源文件的當前狀態(tài)revents中已經(jīng)有了我們所關心的events的話冈闭,
* 就將當前epitem鏈接到eventpoll就緒隊列
*/
/*
* epitem可能已經(jīng)被異步ep_poll_callback()調用添加到了eventpoll中的
* 就緒隊列里...這就是為什么需要!ep_is_linked(&epi->rdlink)的原因
*/
if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
/* Notify waiting tasks that events are available */
/* 喚醒epoll_wait()當前epoll實例的用戶 */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
/* 當前epoll文件已就緒 */
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irqrestore(&ep->lock, flags);
/* 更新當前用戶的監(jiān)聽事件數(shù)量 */
atomic_inc(&ep->user->epoll_watches);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 0;
error_unregister:
ep_unregister_pollwait(ep, epi);
/*
* We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist
* list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held.
*/
spin_lock_irqsave(&ep->lock, flags);
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->lock, flags);
kmem_cache_free(epi_cache, epi);
return error;
}
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
if (p && wait_address)
p->qproc(filp, wait_address, p);
}
/*
* This is the callback that is used to add our wait queue to the
* target file wakeup lists.
*/
/**
* ep_ptable_queue_proc - 將epitem掛載到資源文件的監(jiān)聽隊列
* @file: 被監(jiān)聽的資源文件
* @whead: 被監(jiān)聽的資源文件的等待隊列頭
* @pt: 在ep_insert()中設置的poll_tbale
*/
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt)
{
/* 獲取epitem */
struct epitem *epi = ep_item_from_epqueue(pt);
struct eppoll_entry *pwq;
/* 從slab分配一個eppoll_entry結構俱尼,然后進行相應的初始化 */
if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
/*
* 初始化一個等待隊列節(jié)點,其中喚醒函數(shù)設置為ep_poll_callback
*
* 重點N堋S霭恕!:
* 喚醒回調函數(shù)為ep_poll_callbackKP荨H杏馈!
*/
init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
/* 還要保存資源文件監(jiān)聽隊列的隊列頭whead */
pwq->whead = whead;
pwq->base = epi;
/* 將eppoll_entry掛載到資源文件的監(jiān)聽隊列中 */
add_wait_queue(whead, &pwq->wait);
/*
* 將eppoll_entry與對應的epitem進行關聯(lián)...
* 雖然使用鏈表進行的鏈接羊精,但是epitem與eppoll_entry是1:1關系
*/
/* FIXME:可能我對這里有誤解斯够,歡迎大家指正 */
list_add_tail(&pwq->llink, &epi->pwqlist);
/* 增加等待計數(shù) */
epi->nwait++;
} else {
/* We have to signal that an error occurred */
epi->nwait = -1;
}
}
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
* all the associated resources. Must be called with "mtx" held.
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
unsigned long flags;
struct file *file = epi->ffd.file;
/*
* Removes poll wait queue hooks. We _have_ to do this without holding
* the "ep->lock" otherwise a deadlock might occur. This because of the
* sequence of the lock acquisition. Here we do "ep->lock" then the wait
* queue head lock when unregistering the wait queue. The wakeup callback
* will run by holding the wait queue head lock and will call our callback
* that will try to get "ep->lock".
*/
/* 卸載epitem在資源文件上的監(jiān)聽 */
ep_unregister_pollwait(ep, epi);
/* Remove the current item from the list of epoll hooks */
/* spinlock加鎖:保護struct file的訪問 */
spin_lock(&file->f_lock);
/* 將epitem與所監(jiān)聽的文件解除關聯(lián) */
if (ep_is_linked(&epi->fllink))
list_del_init(&epi->fllink);
spin_unlock(&file->f_lock);
/* 從eventpoll的紅黑樹中刪除節(jié)點,不需要ep->lock加鎖 */
rb_erase(&epi->rbn, &ep->rbr);
/* spinlock加鎖:保護eventpoll的訪問 */
spin_lock_irqsave(&ep->lock, flags);
/* 將epitem從eventpoll中的就緒隊列中卸載 */
/*
* epitem掛載在ep->ovflist只能出現(xiàn)在epoll_wait() -> ep_poll()
* -> ep_scan_ready_list()中的ep->mtx臨界區(qū)內喧锦,所以這里不用判斷
* epi->next != NULL
*/
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->lock, flags);
/* At this point it is safe to free the eventpoll item */
/* 釋放節(jié)點 */
kmem_cache_free(epi_cache, epi);
/* 更新用戶的監(jiān)聽事件數(shù)量 */
atomic_dec(&ep->user->epoll_watches);
return 0;
}
/*
* This function unregisters poll callbacks from the associated file
* descriptor. Must be called with "mtx" held (or "epmutex" if called from
* ep_free).
*/
/*
* 卸載監(jiān)聽事件:從資源文件的監(jiān)聽隊列中刪除读规、釋放epitem關聯(lián)的eppoll_entry
*/
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
{
/* epi->pwdlist將epitem與對應的eppoll_entry進行了關聯(lián) */
struct list_head *lsthead = &epi->pwqlist;
struct eppoll_entry *pwq;
while (!list_empty(lsthead)) {
/* 獲取eppoll_entry結構 */
pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
/* 將eppoll_entry與epitem解除關聯(lián) */
list_del(&pwq->llink);
/* 從資源文件的監(jiān)聽隊列中卸載 */
remove_wait_queue(pwq->whead, &pwq->wait);
/* 釋放節(jié)點 */
kmem_cache_free(pwq_cache, pwq);
}
}
/*
* Modify the interest event mask by dropping an event if the new mask
* has a match in the current file status. Must be called with "mtx" held.
*/
static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
{
int pwake = 0;
unsigned int revents;
/*
* Set the new event interest mask before calling f_op->poll();
* otherwise we might miss an event that happens between the
* f_op->poll() call and the new event set registering.
*/
/* 修改epoll_event */
epi->event.events = event->events;
epi->event.data = event->data; /* protected by mtx */
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
*/
/*
* 因為修改了監(jiān)聽的events,因此需要重新獲得資源的當前狀態(tài)裸违,然后判斷資源的
* 當前狀態(tài)revents中是否包含了我們新關心的events
*/
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
/*
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
/* 如果資源的當前狀態(tài)包含了我們新關心的events掖桦,就緒,并喚醒相應用戶 */
if (revents & event->events) {
/* spinlock加鎖:保護eventpoll的訪問 */
spin_lock_irq(&ep->lock);
/*
* epitem可能已經(jīng)被異步ep_poll_callback()調用添加到了eventpoll中的
* 就緒隊列里...這就是為什么需要!ep_is_linked(&epi->rdlink)的原因
*/
if (!ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 0;
}
epoll_wait()
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
*/
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout)
{
int error;
struct file *file;
struct eventpoll *ep;
/* The maximum number of event must be greater than zero */
/* 參數(shù)驗證 */
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
return -EINVAL;
/* Verify that the area passed by the user is writeable */
/* 驗證events數(shù)組區(qū)域供汛,當前用戶是否能夠訪問 */
if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
error = -EFAULT;
goto error_return;
}
/* Get the "struct file *" for the eventpoll file */
error = -EBADF;
/* 獲取eventpoll文件描述符對應的struct file結構 */
file = fget(epfd);
if (!file)
goto error_return;
/*
* We have to check that the file structure underneath the fd
* the user passed to us _is_ an eventpoll file.
*/
error = -EINVAL;
/* 驗證epfd指向的文件是否是epoll文件 */
if (!is_file_epoll(file))
goto error_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
/* 取出掛載到epoll文件中的eventpoll */
ep = file->private_data;
/* Time to fish for events ... */
/* 調用ep_poll()等待事件的到來 */
error = ep_poll(ep, events, maxevents, timeout);
error_fput:
fput(file);
error_return:
return error;
}
/*
* 喚醒發(fā)生在:
* 1. ep_insert()
* 2. ep_modify()
* 3. ep_poll_callback()
* 3. ep_poll() -> ep_send_events() -> ep_scan_ready_list()
*/
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
int res, eavail;
unsigned long flags;
long jtimeout;
wait_queue_t wait;
/*
* Calculate the timeout by checking for the "infinite" value (-1)
* and the overflow condition. The passed timeout is in milliseconds,
* that why (t * HZ) / 1000.
*/
/* 處理睡眠時間:將毫秒數(shù)轉化為HZ */
jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
retry:
/* spinlock加鎖:保護eventpoll的訪問 */
spin_lock_irqsave(&ep->lock, flags);
res = 0;
/* 就緒隊列為空,說明還沒有任何events就緒 */
if (list_empty(&ep->rdllist)) {
/*
* We don't have any available event to return to the caller.
* We need to sleep here, and we will be wake up by
* ep_poll_callback() when events will become available.
*/
/* 初始化等待隊列節(jié)點涌穆,設置等待狀態(tài)為互斥等待 */
init_waitqueue_entry(&wait, current);
wait.flags |= WQ_FLAG_EXCLUSIVE;
/* 將剛剛初始化的等待隊列節(jié)點掛載到eventpoll中的等待隊列 */
__add_wait_queue(&ep->wq, &wait);
for (;;) {
/*
* We don't want to sleep if the ep_poll_callback() sends us
* a wakeup in between. That's why we set the task state
* to TASK_INTERRUPTIBLE before doing the checks.
*/
/* 設置程序運行狀態(tài)為可中斷阻塞怔昨,因為我們希望能夠接收到
* ep_insert()、ep_modify()宿稀、ep_poll_callback()的喚醒
*/
set_current_state(TASK_INTERRUPTIBLE);
/* events就緒或者超時趁舀,跳出循環(huán) */
if (!list_empty(&ep->rdllist) || !jtimeout)
break;
/* 出現(xiàn)未決信號,設置返回值為-EINTR并跳出循環(huán) */
if (signal_pending(current)) {
res = -EINTR;
break;
}
spin_unlock_irqrestore(&ep->lock, flags);
/* 休眠...等待超時或者被就緒資源喚醒 */
jtimeout = schedule_timeout(jtimeout);
spin_lock_irqsave(&ep->lock, flags);
}
/* 從等待隊列中卸載 */
__remove_wait_queue(&ep->wq, &wait);
/* 恢復程序運行狀態(tài) */
set_current_state(TASK_RUNNING);
}
/* Is it worth to try to dig for events ? */
/* 判斷是否有資源就緒 */
eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
spin_unlock_irqrestore(&ep->lock, flags);
/*
* Try to transfer events to user space. In case we get 0 events and
* there's still timeout left over, we go trying again in search of
* more luck.
*/
/*
* 如果沒有發(fā)生中斷(!res)祝沸、有資源就緒(eavail)矮烹,我們就將就緒的events向用戶空間
* 交付(ep_send_events())
* 如果ep_send_events()向用戶交付的事件數(shù)為0越庇,并且還有超時時間剩余(jtimeout),
* 那么我們retry奉狈,期待不要空手而歸...
*/
if (!res && eavail &&
!(res = ep_send_events(ep, events, maxevents)) && jtimeout)
goto retry;
return res;
}
static int ep_send_events(struct eventpoll *ep,
struct epoll_event __user *events, int maxevents)
{
struct ep_send_events_data esed;
/* 注意:將events數(shù)組與event最大接受數(shù)maxevents封裝到了一起 */
esed.maxevents = maxevents;
esed.events = events;
/* 注意:events交付例程指定為ep_send_events_proc */
return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
}
/**
* ep_scan_ready_list - Scans the ready list in a way that makes possible for
* the scan code, to call f_op->poll(). Also allows for
* O(NumReady) performance.
*
* @ep: Pointer to the epoll private data structure.
* @sproc: Pointer to the scan callback.
* @priv: Private opaque data passed to the @sproc callback.
*
* Returns: The same integer error code returned by the @sproc callback.
*/
static int ep_scan_ready_list(struct eventpoll *ep,
int (*sproc)(struct eventpoll *,
struct list_head *, void *),
void *priv)
{
int error, pwake = 0;
unsigned long flags;
struct epitem *epi, *nepi;
/* 初始化一個鏈表 */
LIST_HEAD(txlist);
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() and epoll_ctl().
*/
/* mutex加鎖 */
mutex_lock(&ep->mtx);
/*
* Steal the ready list, and re-init the original one to the
* empty list. Also, set ep->ovflist to NULL so that events
* happening while looping w/out locks, are not lost. We cannot
* have the poll callback to queue directly on ep->rdllist,
* because we want the "sproc" callback to be able to do it
* in a lockless way.
*/
/* spinlock加鎖:保護eventpoll的訪問 */
spin_lock_irqsave(&ep->lock, flags);
/*
* 將eventpoll就緒隊列中的所有節(jié)點全部splice到鏈表txlist上卤唉,
* 之后eventpoll就緒隊列為空
*/
list_splice_init(&ep->rdllist, &txlist);
/* 設置eventpoll.ovflist,使得接下來新就緒的events被掛載到
* eventpoll.ovflist而不是就緒隊列 */
ep->ovflist = NULL;
spin_unlock_irqrestore(&ep->lock, flags);
/*
* Now call the callback function.
*/
/*
* sproc ==> ep_send_events_proc
* priv封裝了events數(shù)組與events最大接受數(shù)maxevents
*
* 注意:ep_send_events_proc()只在ep->mtx臨界區(qū)內
*/
error = (*sproc)(ep, &txlist, priv);
/* spinlock加鎖:保護eventpoll的訪問 */
spin_lock_irqsave(&ep->lock, flags);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
* We re-insert them inside the main ready-list here.
*/
/*
* 我們在調用ep_send_events_proc()將就緒隊列中的事件交付
* 給用戶的期間仁期,新就緒的events被掛載到eventpoll.ovflist
* 所以我們需要遍歷eventpoll.ovflist將所有已就緒的epitem
* 重新掛載到就緒隊列中桑驱,等待下一次epoll_wait()進行交付...
*/
for (nepi = ep->ovflist; (epi = nepi) != NULL;
nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
/*
* We need to check if the item is already in the list.
* During the "sproc" callback execution time, items are
* queued into ->ovflist but the "txlist" might already
* contain them, and the list_splice() below takes care of them.
*/
/* ep_is_linked(&epi->rdlink)的原因見上面的原注釋... */
if (!ep_is_linked(&epi->rdllink))
list_add_tail(&epi->rdllink, &ep->rdllist);
}
/*
* We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
* releasing the lock, events will be queued in the normal way inside
* ep->rdllist.
*/
/* 恢復eventpoll.ovflist,使得接下來新就緒的events被掛載到就緒隊列
* 而不是ovflist */
ep->ovflist = EP_UNACTIVE_PTR;
/*
* Quickly re-inject items left on "txlist".
*/
/* 將調用ep_send_events_proc()之后剩余的未交付的epitem重新splice到
* eventpoll的就緒隊列上 */
list_splice(&txlist, &ep->rdllist);
/*
* 注意到epoll_wait()中跛蛋,將wait_queue_t的等待狀態(tài)設置為互斥等待熬的,因此
* 每次被喚醒的只有一個節(jié)點。現(xiàn)在我們已經(jīng)將eventpoll中就緒隊列里的事件
* 盡量向用戶交付了赊级,但是在交付時,可能沒有交付完全(1.交付過程中出現(xiàn)了
* 錯誤 2.使用了LT模式)墨微,也有可能在過程中又發(fā)生了新的事件镀脂。也就是這次
* epoll_wait()調用后暑竟,還剩下一些就緒資源腹躁,那么我們再次喚醒一個等待節(jié)點
* 讓別的用戶也享用一下資源...
*
* 從這里已經(jīng)可以看出內核對于epoll驚群的解決方案:ET模式:
* 1. 每次只喚醒一個節(jié)點
* 2. 事件交付后不再將事件重新掛載到就緒隊列
*/
if (!list_empty(&ep->rdllist)) {
/*
* Wake up (if active) both the eventpoll wait list and
* the ->poll() wait list (delayed after we release the lock).
*/
/* 喚醒epoll_wait()當前epoll實例的用戶 */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
/* 當前epoll文件已就緒 */
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irqrestore(&ep->lock, flags);
mutex_unlock(&ep->mtx);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return error;
}
static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct ep_send_events_data *esed = priv;
int eventcnt;
unsigned int revents;
struct epitem *epi;
struct epoll_event __user *uevent;
/*
* We can loop without lock because we are passed a task private list.
* Items cannot vanish during the loop because ep_scan_ready_list() is
* holding "mtx" during this call.
*/
/*
* 遍歷head就緒隊列
*
* eventcnt記錄已交付的events的數(shù)量
* uevent指向esed中封裝的events數(shù)組,這個數(shù)組用于將已就緒events返回給用戶
*/
for (eventcnt = 0, uevent = esed->events;
!list_empty(head) && eventcnt < esed->maxevents;) {
epi = list_first_entry(head, struct epitem, rdllink);
/* 將epitem從head就緒隊列中卸載 */
list_del_init(&epi->rdllink);
/* 從資源文件當前狀態(tài)中提取出我們所關心的events */
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
epi->event.events;
/*
* If the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, ep_scan_ready_list()
* is holding "mtx", so no operations coming from userspace
* can change the item.
*/
/* 如果有我們所關心的events發(fā)生 */
if (revents) {
/*
* 將events復制到用戶空間
*
* 若復制失敗憎账,那么就將該epitem重新添加到head就緒隊列首九妈,然后
* 返回已交付的events的數(shù)量晶疼,調用者 ==> ep_scan_ready_list()
* 會重新將head就緒隊列splice到eventpoll的就緒隊列上,等待下次
* epoll_wait()->ep_poll()->ep_send_events()進行交付...
*/
if (__put_user(revents, &uevent->events) ||
__put_user(epi->event.data, &uevent->data)) {
/* 復制失敗了... */
list_add(&epi->rdllink, head);
return eventcnt ? eventcnt : -EFAULT;
}
/* 更新已交付的event的數(shù)量 */
eventcnt++;
/* 指向events數(shù)組中的下一元素 */
uevent++;
if (epi->event.events & EPOLLONESHOT)
epi->event.events &= EP_PRIVATE_BITS;
else if (!(epi->event.events & EPOLLET)) {
/*
* If this file has been added with Level
* Trigger mode, we need to insert back inside
* the ready list, so that the next call to
* epoll_wait() will check again the events
* availability. At this point, noone can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_scan_ready_list() holding "mtx" and the
* poll callback will queue them in ep->ovflist.
*/
/*
* LT模式:只要資源滿足某種狀態(tài)冒晰,就向用戶交付該events
* ET模式:只有資源狀態(tài)發(fā)生改變時埠况,才向用戶交付events
*
* 如果是LT模式狈谊,那么每次向用戶交付events之后喜命,再次把該epitem
* 掛載到eventpoll中的就緒隊列上,下一次epoll_wait()時不休眠
* 直接進入到ep_send_events_proc()中來河劝,通過獲取資源文件的最新
* 狀態(tài)然后與我們關心的events比較:
* 1. 如果資源狀態(tài)還是滿足我們關心的events(可能是資源又就緒了壁榕,
* 也有可能是上次就緒的資源未消費完),那么還是把它重新掛載
* 到就緒隊列并再次交付赎瞎;
* 2. 如果不再滿足我們關心的events(上一次的就緒資源已經(jīng)消費完
* 并且還沒有再次就緒)牌里,那么將它從就緒隊列上卸載之后可就不
* 再重新掛載了...
*
* 關于第2點,有博客講可能會使這次epoll_wait()返回0空轉一次务甥,
* 然而通過程序測試牡辽,發(fā)現(xiàn)并沒有...讓我們跟蹤一下內核...
*
* e.g.
* 假設我們的epoll實例中只監(jiān)聽了一個listen套接字,并且現(xiàn)在只來了
* 一個連接敞临,那么epoll_wait()被喚醒然后向用戶交付這個事件态辛,然后
* 又把這個事件epitem重新掛載到了就緒隊列,最后返回到用戶空間...
* 第二次epoll_wait()無休眠第一次進入到ep_send_events_proc()中來挺尿,
* 然后出現(xiàn)了上述第2點描述的情況奏黑。因為eventpoll中只有一個節(jié)點,
* 所以就緒隊列遍歷完畢票髓,eventcnt為0攀涵,然后回退ep_send_events_proc()
* -> ep_scan_ready_list() -> ep_send_events() -> ep_poll()
* 哈哈!洽沟!現(xiàn)在可以去理解ep_poll()最后的注釋了...
*/
list_add_tail(&epi->rdllink, &ep->rdllist);
}
}
}
return eventcnt;
}
ep_poll_callback()
/*
* This is the callback that is passed to the wait queue wakeup
* machanism. It is called by the stored file descriptors when they
* have events to report.
*/
/**
* ep_poll_callback - 喚醒回調函數(shù)以故,這個函數(shù)將就緒的epitem鏈接到所屬eventpoll中的
* 就緒隊列,并喚醒監(jiān)聽者
* @wait: eppoll_entry.wait
* @mode:
* @key: 攜帶資源當前狀態(tài)
*/
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
int pwake = 0;
unsigned long flags;
/* 通過eppoll_entry中的wait獲取對應的epitem */
struct epitem *epi = ep_item_from_wait(wait);
/* 獲取epitem所屬的eventpoll */
struct eventpoll *ep = epi->ep;
/* spinlock加鎖:保護eventpoll的訪問 */
/*
* 注意:ep_poll_callback()中只用了spinlock裆操,因為這個回調函數(shù)在資源就緒時怒详,由
* 資源的中斷處理程序所調用,而中斷處理程序中不允許休眠踪区,所以這里面的同步不能
* 使用可休眠鎖mutex
*
* 因為沒有ep->mtx加鎖昆烁,所以感覺這里應該可能出現(xiàn)競爭條件,在ep_item_from_wait()
* 獲取epitem之后缎岗,這個epitem可能被異步刪除...
*/
spin_lock_irqsave(&ep->lock, flags);
/*
* If the event mask does not contain any poll(2) event, we consider the
* descriptor to be disabled. This condition is likely the effect of the
* EPOLLONESHOT bit that disables the descriptor when an event is received,
* until the next EPOLL_CTL_MOD will be issued.
*/
/* 如果我們想要監(jiān)聽的事件events為空静尼,那么資源文件就緒時,nothing to do */
if (!(epi->event.events & ~EP_PRIVATE_BITS))
goto out_unlock;
/*
* Check the events coming with the callback. At this stage, not
* every device reports the events in the "key" parameter of the
* callback. We need to be able to handle both cases here, hence the
* test for "key" != NULL before the event match test.
*/
/* 判斷文件當前狀態(tài)key中有沒有我們關心的事件events */
if (key && !((unsigned long) key & epi->event.events))
goto out_unlock;
/*
* If we are trasfering events to userspace, we can hold no locks
* (because we're accessing user memory, and because of linux f_op->poll()
* semantics). All the events that happens during that period of time are
* chained in ep->ovflist and requeued later on.
*/
/*
* 異步調用ep_send_events_proc()將就緒隊列中的事件交付給
* 用戶的期間(也就是ep->ovflist != EP_UNACTIVE_PTR時),
* 新就緒的events應該被掛載到eventpoll.ovflist
*/
/* FIXME:查了很多資料鼠渺,沒有查到到ovflist的具體作用鸭巴,我認為ovflist完全是
* 冗余的設計...歡迎指正... */
if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
if (epi->next == EP_UNACTIVE_PTR) {
epi->next = ep->ovflist;
ep->ovflist = epi;
}
goto out_unlock;
}
/* If this file is already in the ready list we exit soon */
/* 如果epitem沒有被掛載到所屬eventpoll中的就緒隊列,就將其添加到就緒隊列尾 */
/*
* 如果一個就緒事件被掛載到eventpoll中的就緒隊列又沒有被處理并卸載,那么當事件
* 再次就緒時不用再次掛載...這就是為什么需要!ep_is_linked(&epi->rdlink)的原因
*/
if (!ep_is_linked(&epi->rdllink))
list_add_tail(&epi->rdllink, &ep->rdllist);
/*
* Wake up ( if active ) both the eventpoll wait list and the ->poll()
* wait list.
*/
/* 喚醒epoll_wait()當前epoll實例的用戶 */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
/* 當前epoll文件已就緒 */
if (waitqueue_active(&ep->poll_wait))
pwake++;
out_unlock:
spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 1;
}
驗證ET模式解決epoll驚群
// server.c
#include <stdio.h>
#include <string.h>
#include <err.h>
#include <errno.h>
#include <pthread.h>
#include <unistd.h>
#include <sys/epoll.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#define NR_THREAD 5
int listenfd, epollfd;
static void *thrd_func(void *args)
{
int connfd, retval;
socklen_t addrlen;
struct sockaddr_in cliaddr;
struct epoll_event revent;
if ((retval = epoll_wait(epollfd, &revent, 1, -1)) == -1)
err(-1, "thread: %ld: epoll_wait: %d", (long)pthread_self(), __LINE__);
fprintf(stderr, "thread: %ld: epoll_wait() return %d\n", (long)pthread_self(), retval);
addrlen = sizeof(struct sockaddr_in);
while (accept(listenfd, (struct sockaddr *)&cliaddr, &addrlen) == -1) {
if (errno == EAGAIN) {
warn("thread: %ld: accept: %d", (long)pthread_self(), __LINE__);
sleep(1);
continue;
}
err(-1, "thread: %ld: epoll_wait: %d", (long)pthread_self(), __LINE__);
}
fprintf(stderr, "thread: %ld: accept a connection: %s:%d\n", (long)pthread_self(),
inet_ntoa(cliaddr.sin_addr), ntohs(cliaddr.sin_port));
pthread_exit(NULL);
}
int main(int argc, char *argv[])
{
int i;
pthread_t threads[NR_THREAD];
struct sockaddr_in servaddr;
struct epoll_event ev;
if ((listenfd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0)) == -1)
err(-1, "socket: %d", __LINE__);
memset(&servaddr, 0, sizeof(struct sockaddr_in));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(10240);
if (bind(listenfd, (struct sockaddr *)&servaddr, sizeof(struct sockaddr_in)) == -1)
err(-1, "bind: %d", __LINE__);
if (listen(listenfd, 0) == -1)
err(-1, "listen: %d", __LINE__);
if ((epollfd = epoll_create1(0)) == -1)
err(-1, "epoll_create1: %d", __LINE__);
ev.events = EPOLLIN;
#ifdef ET
ev.events |= EPOLLET;
#endif
ev.data.fd = listenfd;
if (epoll_ctl(epollfd, EPOLL_CTL_ADD, listenfd, &ev) == -1)
err(-1, "epoll_ctl: %d", __LINE__);
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_create(&threads[i], NULL, &thrd_func, NULL)) != 0)
err(-1, "pthread_create: %d", __LINE__);
}
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_join(threads[i], NULL)) != 0)
err(-1, "pthread_join: %d", __LINE__);
}
return 0;
}
server創(chuàng)建NR_THREAD個線程執(zhí)行epoll_wait()監(jiān)聽listen套接字。這個程序沒有任何實際意義钓猬,這種并發(fā)模型完全可以通過阻塞調用accept(),而如果使用多路轉接還會造成不必要的性能浪費...見《Unix網(wǎng)絡編程 卷1 套接字聯(lián)網(wǎng)API》(30.6:TCP預先派生子進程服務器程序恬口,accept無上鎖保護)
// client.c
#include <stdio.h>
#include <string.h>
#include <err.h>
#include <errno.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netinet/in.h>
int main(int argc, char *argv[])
{
int connfd;
struct sockaddr_in servaddr;
if ((connfd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
err(-1, "socket: %d", __LINE__);
memset(&servaddr, 0, sizeof(struct sockaddr_in));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = inet_addr("127.0.0.1");
servaddr.sin_port = htons(10240);
if (connect(connfd, (struct sockaddr *)&servaddr, sizeof(struct sockaddr_in)) == -1)
err(-1, "connect: %d", __LINE__);
fprintf(stderr, "ok\n");
return 0;
}
client連接一下服務器就退出...(真刺激)
# 這里沒有給出客戶端的執(zhí)行情況,可以看server的輸出自行想象在哪個時間點啟動的client
[Asu@Zombie epoll]$ cc server.c -o server -lpthread
[Asu@Zombie epoll]$ cc client.c -o client
[Asu@Zombie epoll]$ ./server
thread: 140478631536384: epoll_wait() return 1
thread: 140478656714496: epoll_wait() return 1
thread: 140478648321792: epoll_wait() return 1
thread: 140478631536384: accept a connection: 127.0.0.1:38976
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: epoll_wait() return 1
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478665107200: epoll_wait() return 1
thread: 140478665107200: accept a connection: 127.0.0.1:38978
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478656714496: accept a connection: 127.0.0.1:38980
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32:Resource temporarily unavailable
thread: 140478639929088: accept: 32: Resource temporarily unavailable
thread: 140478639929088: accept a connection: 127.0.0.1:38982
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept: 32: Resource temporarily unavailable
thread: 140478648321792: accept a connection: 127.0.0.1:38984
[Asu@Zombie epoll]$
# 注意:這里使用了-DET編譯的server.c沼侣,也就是以EPOLLET模式監(jiān)聽listen套接字
# 可以看到完美解決了epoll驚群祖能,但是ET模式還是有缺陷的...不安全...
[Asu@Zombie epoll]$ cc server.c -o server -lpthread -DET
[Asu@Zombie epoll]$ cc client.c -o client
[Asu@Zombie epoll]$ ./server
thread: 139991003125504: epoll_wait() return 1
thread: 139991003125504: accept a connection: 127.0.0.1:38990
thread: 139991011518208: epoll_wait() return 1
thread: 139991011518208: accept a connection: 127.0.0.1:38992
thread: 139991019910912: epoll_wait() return 1
thread: 139991019910912: accept a connection: 127.0.0.1:38994
thread: 139991028303616: epoll_wait() return 1
thread: 139991028303616: accept a connection: 127.0.0.1:38996
thread: 139991036696320: epoll_wait() return 1
thread: 139991036696320: accept a connection: 127.0.0.1:38998
[Asu@Zombie epoll]$
linux內核對于epoll驚群的解決方案就是wake up one,但是由于LT模式將epitem重新掛載到就緒隊列华临,導致LT模式的epoll驚群沒有被解決...
accept驚群的解決方案
/*
* 早期linux內核沒有解決accept驚群芯杀,所以需要用戶自己來解決,
* 解決方案是:每次accept前加鎖雅潭,accept之后解鎖揭厚,這樣可以
* 確保任意時間點只有一個線程/進程阻塞在accept()上
* 現(xiàn)代linux內核解決了accept驚群,解決方案就是我們以下所做扶供,
* 只是它將鎖保護內置在了accept()調用中...
*/
#include <stdio.h>
#include <string.h>
#include <err.h>
#include <errno.h>
#include <unistd.h>
#include <pthread.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#define NR_THREAD 5
int listenfd;
pthread_mutex_t mtx;
static void *thrd_func(void *args)
{
int connfd, retval;
socklen_t addrlen;
struct sockaddr_in cliaddr;
int error_flg = -1;
if ((errno = pthread_mutex_lock(&mtx)) != 0)
err(-1, "thread: %ld: pthread_mutex_lock: %d", (long)pthread_self(), __LINE__);
addrlen = sizeof(struct sockaddr_in);
if (accept(listenfd, (struct sockaddr *)&cliaddr, &addrlen) == -1)
error_flg = __LINE__;
if ((errno = pthread_mutex_unlock(&mtx)) != 0)
err(-1, "thread: %ld: pthread_mutex_unlock: %d", (long)pthread_self(), __LINE__);
if (error_flg != -1)
err(-1, "thread: %ld: accept: %d", (long)pthread_self(), error_flg);
fprintf(stderr, "thread: %ld: accept a connection: %s:%d\n", (long)pthread_self(),
inet_ntoa(cliaddr.sin_addr), ntohs(cliaddr.sin_port));
pthread_exit(NULL);
}
int main(int argc, char *argv[])
{
int i;
pthread_t threads[NR_THREAD];
struct sockaddr_in servaddr;
if ((listenfd = socket(AF_INET, SOCK_STREAM, 0)) == -1)
err(-1, "socket: %d", __LINE__);
memset(&servaddr, 0, sizeof(struct sockaddr_in));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(10240);
if (bind(listenfd, (struct sockaddr *)&servaddr, sizeof(struct sockaddr_in)) == -1)
err(-1, "bind: %d", __LINE__);
if (listen(listenfd, 0) == -1)
err(-1, "listen: %d", __LINE__);
if ((errno = pthread_mutex_init(&mtx, NULL)) != 0)
err(-1, "pthread_mutex_init: %d", __LINE__);
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_create(&threads[i], NULL, &thrd_func, NULL)) != 0)
err(-1, "pthread_create: %d", __LINE__);
}
for (i = 0; i != NR_THREAD; ++i) {
if ((errno = pthread_join(threads[i], NULL)) != 0)
err(-1, "pthread_join: %d", __LINE__);
}
return 0;
}