簡(jiǎn)介
Redis Bio 就是Background I/O service for Redis.
他把那些耗時(shí)的io操作放到后臺(tái)的線程來(lái)執(zhí)行何之。主線程就專注服務(wù)燃少。避免那些耗時(shí)的釋放休涤,寫入操作等造成服務(wù)的等待句葵。
實(shí)現(xiàn)也特別的簡(jiǎn)單。采用新建處理線程。使用臨界區(qū)投放任務(wù)的方式完成操作锋勺。
核心的一些定義和方法
static pthread_t bio_threads[BIO_NUM_OPS]; //保存pthread
static pthread_mutex_t bio_mutex[BIO_NUM_OPS];//保存mutex
static pthread_cond_t bio_newjob_cond[BIO_NUM_OPS];//保存新任務(wù)條件變量
static pthread_cond_t bio_step_cond[BIO_NUM_OPS];//保存step條件變量
static list *bio_jobs[BIO_NUM_OPS];//保存joblist
static unsigned long long bio_pending[BIO_NUM_OPS];//保存pening數(shù)量
struct bio_job {
time_t time; /* Time at which the job was created. */ //創(chuàng)建時(shí)間
/* Job specific arguments pointers. If we need to pass more than three
* arguments we can just pass a pointer to a structure or alike. */
void *arg1, *arg2, *arg3;//三個(gè)保存的參數(shù)
};
初始化bio
/* Initialize the background system, spawning the thread. */
void bioInit(void) {
pthread_attr_t attr;
pthread_t thread;
size_t stacksize;
int j;
/* Initialization of state vars and objects */
for (j = 0; j < BIO_NUM_OPS; j++) {//基本的初始化操作
pthread_mutex_init(&bio_mutex[j],NULL);
pthread_cond_init(&bio_newjob_cond[j],NULL);
pthread_cond_init(&bio_step_cond[j],NULL);
bio_jobs[j] = listCreate();//創(chuàng)建任務(wù)隊(duì)列
bio_pending[j] = 0;//等待數(shù)量是0
}
/* Set the stack size as by default it may be small in some system */
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr,&stacksize);
if (!stacksize) stacksize = 1; /* The world is full of Solaris Fixes */
while (stacksize < REDIS_THREAD_STACK_SIZE) stacksize *= 2;
pthread_attr_setstacksize(&attr, stacksize);//stack size
/* Ready to spawn our threads. We use the single argument the thread
* function accepts in order to pass the job ID the thread is
* responsible of. */
for (j = 0; j < BIO_NUM_OPS; j++) {
void *arg = (void*)(unsigned long) j;
if (pthread_create(&thread,&attr,bioProcessBackgroundJobs,arg) != 0) {//創(chuàng)建線程
serverLog(LL_WARNING,"Fatal: Can't initialize Background Jobs.");
exit(1);
}
bio_threads[j] = thread;
}
}
整個(gè)創(chuàng)建過(guò)程是一個(gè)標(biāo)準(zhǔn)的初始化過(guò)程。沒(méi)有什么特別的地方狡蝶。
線程函數(shù)
void *bioProcessBackgroundJobs(void *arg) {//線程入口
struct bio_job *job;
unsigned long type = (unsigned long) arg;
sigset_t sigset;
/* Check that the type is within the right interval. */
if (type >= BIO_NUM_OPS) {//type不對(duì)
serverLog(LL_WARNING,
"Warning: bio thread started with wrong type %lu",type);
return NULL;
}
/* Make the thread killable at any time, so that bioKillThreads()
* can work reliably. */
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);//設(shè)置響應(yīng)cancel
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);//設(shè)置立即結(jié)束
pthread_mutex_lock(&bio_mutex[type]);//鎖住互斥
/* Block SIGALRM so we are sure that only the main thread will
* receive the watchdog signal. */
sigemptyset(&sigset);
sigaddset(&sigset, SIGALRM);
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))//只讓主線程處理 SIGALRM
serverLog(LL_WARNING,
"Warning: can't mask SIGALRM in bio.c thread: %s", strerror(errno));
while(1) {
listNode *ln;
/* The loop always starts with the lock hold. */
if (listLength(bio_jobs[type]) == 0) {
pthread_cond_wait(&bio_newjob_cond[type],&bio_mutex[type]);//釋放mutex 并且進(jìn)入阻塞等待
continue;//防止意外喚醒
}
/* Pop the job from the queue. */
ln = listFirst(bio_jobs[type]);// 獲取新的任務(wù)
job = ln->value;
/* It is now possible to unlock the background system as we know have
* a stand alone job structure to process.*/
pthread_mutex_unlock(&bio_mutex[type]);//釋放鎖可以加入新的任務(wù)
/* Process the job accordingly to its type. */
if (type == BIO_CLOSE_FILE) {//判斷自己的類型
close((long)job->arg1);
} else if (type == BIO_AOF_FSYNC) {
aof_fsync((long)job->arg1);
} else if (type == BIO_LAZY_FREE) {
/* What we free changes depending on what arguments are set:
* arg1 -> free the object at pointer.
* arg2 & arg3 -> free two dictionaries (a Redis DB).
* only arg3 -> free the skiplist. */
if (job->arg1)
lazyfreeFreeObjectFromBioThread(job->arg1);
else if (job->arg2 && job->arg3)
lazyfreeFreeDatabaseFromBioThread(job->arg2,job->arg3);
else if (job->arg3)
lazyfreeFreeSlotsMapFromBioThread(job->arg3);
} else {
serverPanic("Wrong job type in bioProcessBackgroundJobs().");
}
zfree(job);
/* Unblock threads blocked on bioWaitStepOfType() if any. */
pthread_cond_broadcast(&bio_step_cond[type]);//喚醒所有的 setp_cond
/* Lock again before reiterating the loop, if there are no longer
* jobs to process we'll block again in pthread_cond_wait(). */
pthread_mutex_lock(&bio_mutex[type]);//鎖住
listDelNode(bio_jobs[type],ln);//刪除完成的任務(wù)
bio_pending[type]--;//pending --
}
}
整理流程:
1獲取鎖庶橱,判斷任務(wù)是否存在,不存在則進(jìn)入cond_wait,存在就提取出一個(gè)任務(wù)贪惹,然后根據(jù)任務(wù)類型苏章,進(jìn)行操作。
2.執(zhí)行結(jié)束后喚醒所有的step_cond,再次獲取鎖枫绅。刪除完成的任務(wù)泉孩。進(jìn)入循環(huán)
創(chuàng)建一個(gè)任務(wù)
void bioCreateBackgroundJob(int type, void *arg1, void *arg2, void *arg3) {//創(chuàng)建job
struct bio_job *job = zmalloc(sizeof(*job));//分配內(nèi)存
job->time = time(NULL);//開始時(shí)間
job->arg1 = arg1;//保存參數(shù)
job->arg2 = arg2;
job->arg3 = arg3;
pthread_mutex_lock(&bio_mutex[type]);//獲取鎖
listAddNodeTail(bio_jobs[type],job);//插入任務(wù)
bio_pending[type]++;//pending++
pthread_cond_signal(&bio_newjob_cond[type]);//通知等待
pthread_mutex_unlock(&bio_mutex[type]);
}
關(guān)閉后臺(tái)io線程
void bioKillThreads(void) {
int err, j;
for (j = 0; j < BIO_NUM_OPS; j++) {
if (pthread_cancel(bio_threads[j]) == 0) {//發(fā)送cancle
if ((err = pthread_join(bio_threads[j],NULL)) != 0) {//join等待
serverLog(LL_WARNING,
"Bio thread for job type #%d can be joined: %s",
j, strerror(err));
} else {
serverLog(LL_WARNING,
"Bio thread for job type #%d terminated",j);
}
}
}
}
一些狀態(tài)的獲取
unsigned long long bioWaitStepOfType(int type) {
unsigned long long val;
pthread_mutex_lock(&bio_mutex[type]);//加鎖
val = bio_pending[type];
if (val != 0) {//
pthread_cond_wait(&bio_step_cond[type],&bio_mutex[type]);//d等待通知
val = bio_pending[type];
}
pthread_mutex_unlock(&bio_mutex[type]);//釋放鎖
return val;
}
unsigned long long bioPendingJobsOfType(int type) {
unsigned long long val;
pthread_mutex_lock(&bio_mutex[type]);
val = bio_pending[type];
pthread_mutex_unlock(&bio_mutex[type]);
return val;
}