概述
一、原理
Deadline调度器对一个请求的多方面特性进行权衡来进行调度,以期即能满足块设备扇区的顺寻访问又兼顾到一个请求不会在队列中等待太久导致饿死。试想当应用程序频繁访问文件的一部分而此时如果有另一个远端的请求,那么这个请求将会在很长一段时间内得不到响应,这显然是不合理的。Deadline调度器为了兼顾这两个方面,引入了四个队列,这四个队列可分为两类,每一类都由读和写两种队列组成。一类队列用来对请求按起始扇区序号进行排序,通过红黑树来组织,称为sort_list;另一类对请求按它们的生成时间进行排序,由链表来组织,称为fifo_list。
二、batching
每当确定了一个传输方向(读或写),那么将会从相应的sort_list中将一批连续请求dispatch到requst_queue的请求队列里,具体的数目由fifo_batch来确定。只有下面三种情况才会导致一次批量传输的结束:
1.对应的sort_list中已经没有请求了
2.下一个请求的扇区不满足递增的要求
3.上一个请求已经是批量传输的最后一个请求了。
三、deadline和dispatch
所有的请求在生成时都会被赋上一个期限值(根据jiffies),并按期限值排序在fifo_list中,读请求的期限时长默认为为500ms,写请求的期限时长默认为5s,可以看出内核对读请求是十分偏心的,其实不仅如此,在deadline调度器中,还定义了一个starved和writes_starved,writes_starved默认为2,可以理解为写请求的饥饿线,内核总是优先处理读请求,starved表明当前处理的读请求批数,只有starved超过了writes_starved后,才会去考虑写请求。因此,假如一个写请求的期限已经超过,该请求也不一定会被立刻响应,因为读请求的batch还没处理完,即使处理完,也必须等到starved超过writes_starved才有机会被响应。为什么内核会偏袒读请求?这是从整体性能上进行考虑的。读请求和应用程序的关系是同步的,因为应用程序要等待读取的内容完毕,才能进行下一步工作,因此读请求会阻塞进程,而写请求则不一样,应用程序发出写请求后,内存的内容何时写入块设备对程序的影响并不大,所以调度器会优先处理读请求。
四、Deadline Scheduler的实现
下面结合实际代码来分析Deadline Scheduler。首先来看调度器使用的相关数据结构:
- struct deadline_data {
- /*
- * run time data
- */
- /*
- * requests (deadline_rq s) are present on both sort_list and fifo_list
- */
- struct rb_root sort_list[2];
- struct list_head fifo_list[2];
- /*
- * next in sort order. read, write or both are NULL
- */
- struct request *next_rq[2];
- unsigned int batching; /* number of sequential requests made */
- sector_t last_sector; /* head position */
- unsigned int starved; /* times reads have starved writes */
- /*
- * settings that change how the i/o scheduler behaves
- */
- int fifo_expire[2];
- int fifo_batch;
- int writes_starved;
- int front_merges;
- };
struct deadline_data {
/*
* run time data
*/
/*
* requests (deadline_rq s) are present on both sort_list and fifo_list
*/
struct rb_root sort_list[2];
struct list_head fifo_list[2];
/*
* next in sort order. read, write or both are NULL
*/
struct request *next_rq[2];
unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */
/*
* settings that change how the i/o scheduler behaves
*/
int fifo_expire[2];
int fifo_batch;
int writes_starved;
int front_merges;
};
sort_list:读写请求的红黑树,以请求的起始扇区来排序
fifo_list:读写请求的链表,以请求的响应期限来排序
next_rq:下一个读(写)请求,当确定一个批量传输时,通过该指针直接获取下一个请求
batching:批量传输的当前值
last_sector:处理的rq的末尾扇区号
starved: 标识着当前是第starved批读请求传输
fifo_expire:读写请求的期限值
fifo_batch:批量传输的请求数
writes_starved:写请求的饿死线,传输了writes_starved批读请求后,必须传输写请求
front_merges:是否使能front merge的检查
Deadline Scheduler的定义:
- static struct elevator_type iosched_deadline = {
- .ops = {
- .elevator_merge_fn = deadline_merge,
- .elevator_merged_fn = deadline_merged_request,
- .elevator_merge_req_fn = deadline_merged_requests,
- .elevator_dispatch_fn = deadline_dispatch_requests,
- .elevator_add_req_fn = deadline_add_request,
- .elevator_queue_empty_fn = deadline_queue_empty,
- .elevator_former_req_fn = elv_rb_former_request,
- .elevator_latter_req_fn = elv_rb_latter_request,
- .elevator_init_fn = deadline_init_queue,
- .elevator_exit_fn = deadline_exit_queue,
- },
- .elevator_attrs = deadline_attrs,
- .elevator_name = "deadline",
- .elevator_owner = THIS_MODULE,
- };
static struct elevator_type iosched_deadline = {
.ops = {
.elevator_merge_fn = deadline_merge,
.elevator_merged_fn = deadline_merged_request,
.elevator_merge_req_fn = deadline_merged_requests,
.elevator_dispatch_fn = deadline_dispatch_requests,
.elevator_add_req_fn = deadline_add_request,
.elevator_queue_empty_fn = deadline_queue_empty,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
.elevator_init_fn = deadline_init_queue,
.elevator_exit_fn = deadline_exit_queue,
},
.elevator_attrs = deadline_attrs,
.elevator_name = "deadline",
.elevator_owner = THIS_MODULE,
};
初始化函数deadline_init_queue()用于初始化struct deadline_data中的数据,没有太多好说的,先来看检查一个bio是否能合并到request中的函数deadline_merge()
- static int
- deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
- {
- struct deadline_data *dd = q->elevator->elevator_data;
- struct request *__rq;
- int ret;
- /*
- * check for front merge
- */
- if (dd->front_merges) {//在deadline scheduler使能了front_merges的情况下才会进行front merge的检查
- sector_t sector = bio->bi_sector + bio_sectors(bio);//取bio的最后一个扇区
- //从红黑树中查找起始扇区号与sector相同的request
- __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
- if (__rq) {//查找成功
- BUG_ON(sector != blk_rq_pos(__rq));
- if (elv_rq_merge_ok(__rq, bio)) {//各项属性的检查,确定bio可以插入
- ret = ELEVATOR_FRONT_MERGE;//设置状态
- goto out;
- }
- }
- }
- return ELEVATOR_NO_MERGE;
- out:
- *req = __rq;
- return ret;//将检查的结果返回给通用层
- }
static int
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq;
int ret;
/*
* check for front merge
*/
if (dd->front_merges) {//在deadline scheduler使能了front_merges的情况下才会进行front merge的检查
sector_t sector = bio->bi_sector + bio_sectors(bio);//取bio的最后一个扇区
//从红黑树中查找起始扇区号与sector相同的request
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) {//查找成功
BUG_ON(sector != blk_rq_pos(__rq));
if (elv_rq_merge_ok(__rq, bio)) {//各项属性的检查,确定bio可以插入
ret = ELEVATOR_FRONT_MERGE;//设置状态
goto out;
}
}
}
return ELEVATOR_NO_MERGE;
out:
*req = __rq;
return ret;//将检查的结果返回给通用层
}
有朋友可能会疑问这里为什么只做前插入的检查而不做后插入的检查,显然后插入的可能性更高(根据访问文件的顺序,由低扇区到高扇区)。正是因为后插入的可能性大大高于前插入,因此在通用层,内核就做了后插入的检查了,后插入是根据调度器的散列表来进行查询的,这个我在递交IO请求的那一部分有介绍过。
pass:在分析调度器的相关函数时还得紧扣__make_request()这个通用层函数。
函数deadline_merged_request进行bio插入的善后工作。。主要是考虑前插入改变了原红黑树节点的值,所以要将节点删除再重新进行插入。
- <SPAN style="FONT-SIZE: 12px">static void deadline_merged_request(struct request_queue *q,
- struct request *req, int type)
- {
- struct deadline_data *dd = q->elevator->elevator_data;
- /*
- * if the merge was a front merge, we need to reposition request
- */
- if (type == ELEVATOR_FRONT_MERGE) {//如果是是将bio插入request的bio链表的前面则要进行request的重定位
- elv_rb_del(deadline_rb_root(dd, req), req);//将request从红黑树中删除
- deadline_add_rq_rb(dd, req);//重新添加至红黑树
- }
- }</SPAN>
static void deadline_merged_request(struct request_queue *q,
struct request *req, int type)
{
struct deadline_data *dd = q->elevator->elevator_data;
/*
* if the merge was a front merge, we need to reposition request
*/
if (type == ELEVATOR_FRONT_MERGE) {//如果是是将bio插入request的bio链表的前面则要进行request的重定位
elv_rb_del(deadline_rb_root(dd, req), req);//将request从红黑树中删除
deadline_add_rq_rb(dd, req);//重新添加至红黑树
}
}
在通用层进行request的合并后,deadline_merged_requests()函数负责善后,注意合并时都是保留前request,舍弃后request
- <SPAN style="FONT-SIZE: 12px">static void
- deadline_merged_requests(struct request_queue *q, struct request *req,
- struct request *next)
- {
- /*
- * if next expires before rq, assign its expire time to rq
- * and move into next position (next will be deleted) in fifo
- */
- /*首先要保证两个请求的所属的队列不为空,然后根据req和next的响应期限时间长短,来选择保留哪个,如果
- 后者比前者的期限时间短,也就是先响应,那就要将next的期限时间赋给req,并且将req放置到next在fifo的
- 位置,因为next将要被删除*/
- if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- //如果next的期限时间小于req
- if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
- list_move(&req->queuelist, &next->queuelist);//调整req在fifo的位置
- rq_set_fifo_time(req, rq_fifo_time(next));//重置req的期限时间
- }
- }
- /*
- * kill knowledge of next, this one is a goner
- */
- //将next从链表和红黑树中删除
- deadline_remove_request(q, next);
- }
- </SPAN>
static void
deadline_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
* if next expires before rq, assign its expire time to rq
* and move into next position (next will be deleted) in fifo
*/
/*首先要保证两个请求的所属的队列不为空,然后根据req和next的响应期限时间长短,来选择保留哪个,如果
后者比前者的期限时间短,也就是先响应,那就要将next的期限时间赋给req,并且将req放置到next在fifo的
位置,因为next将要被删除*/
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
//如果next的期限时间小于req
if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
list_move(&req->queuelist, &next->queuelist);//调整req在fifo的位置
rq_set_fifo_time(req, rq_fifo_time(next));//重置req的期限时间
}
}
/*
* kill knowledge of next, this one is a goner
*/
//将next从链表和红黑树中删除
deadline_remove_request(q, next);
}
deadline_add_request()用于将一个新请求添加至调度器,主要是插入各个数据结构,并设置期限值
- <SPAN style="FONT-SIZE: 12px">static void
- deadline_add_request(struct request_queue *q, struct request *rq)
- {
- struct deadline_data *dd = q->elevator->elevator_data;
- const int data_dir = rq_data_dir(rq);//获取request的读写方向
- deadline_add_rq_rb(dd, rq);//将rq插入红黑树
- /*
- * set expire time and add to fifo list
- */
- //设置期限时间
- rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
- //将rq插入fifo_list
- list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
- }
- </SPAN>
static void
deadline_add_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
const int data_dir = rq_data_dir(rq);//获取request的读写方向
deadline_add_rq_rb(dd, rq);//将rq插入红黑树
/*
* set expire time and add to fifo list
*/
//设置期限时间
rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
//将rq插入fifo_list
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}
最后要分析的一个函数,也是最重要的一个--deadline_dispatch_requests 调度器如何选择request,分派给request_queue
- <SPAN style="FONT-SIZE: 12px">static int deadline_dispatch_requests(struct request_queue *q, int force)
- {
- struct deadline_data *dd = q->elevator->elevator_data;
- //确定读写fifo的状态
- const int reads = !list_empty(&dd->fifo_list[READ]);
- const int writes = !list_empty(&dd->fifo_list[WRITE]);
- struct request *rq;
- int data_dir;
- /*
- * batches are currently reads XOR writes
- */
- //如果next_rq中指定了rq,则据此确定下一个分派的rq对象
- if (dd->next_rq[WRITE])
- rq = dd->next_rq[WRITE];
- else
- rq = dd->next_rq[READ];
- //指定了rq并且当前的batching数小于预定的batch数值,则进行分派操作
- if (rq && dd->batching < dd->fifo_batch)
- /* we have a next request are still entitled to batch */
- goto dispatch_request;
- /*
- * at this point we are not running a batch. select the appropriate
- * data direction (read / write)
- */
- /*到了这里,说明没有指定rq,也就是说要进行权衡来选择分派哪一个rq
- 首先要选择方向,即选定读请求还是写请求*/
- if (reads) {//读请求fifo不为空
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
- //如果写请求fifo也不为空,并且当前的starved数值已经超过了writes_starved的数值,
- //也就是说之前已经连续处理了starved个读请求了,超过了写请求的饿死线,则选择
- //分派一个写请求
- if (writes && (dd->starved++ >= dd->writes_starved))
- goto dispatch_writes;
- data_dir = READ;//确定下一个请求的读写方向为读
- goto dispatch_find_request;
- }
- /*
- * there are either no reads or writes have been starved
- */
- if (writes) {
- dispatch_writes://走到这里,说明没有读请求,或者写请求处于饿死状态,必须被处理
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
- dd->starved = 0;//starved重置为0
- data_dir = WRITE;//确定下一个请求的读写方向为写
- goto dispatch_find_request;
- }
- return 0;
- dispatch_find_request://这里开始重新确定下一批连续分派的rq的第一个rq
- /*
- * we are not running a batch, find best request for selected data_dir
- */
- /*如果jiffies已超过相应fifo list中的第一个rq的期限 或者
- 上一个请求和现在的请求方向是相反的,则取fifo list中的饥饿rq
- 一种可能的情况就是读请求过多,导致写请求饿死*/
- if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
- /*
- * A deadline has expired, the last request was in the other
- * direction, or we have run out of higher-sectored requests.
- * Start again from the request with the earliest expiry time.
- */
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
- } else {//否则,从扇区连续性的角度考虑,接着上次分派的rq继续下一个rq
- /*
- * The last req was the same dir and we have a next request in
- * sort order. No expired requests so continue on from here.
- */
- rq = dd->next_rq[data_dir];
- }
- dd->batching = 0;
- dispatch_request:
- /*
- * rq is the selected appropriate request.
- */
- dd->batching++;
- deadline_move_request(dd, rq);
- return 1;
- }</SPAN>
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
struct deadline_data *dd = q->elevator->elevator_data;
//确定读写fifo的状态
const int reads = !list_empty(&dd->fifo_list[READ]);
const int writes = !list_empty(&dd->fifo_list[WRITE]);
struct request *rq;
int data_dir;
/*
* batches are currently reads XOR writes
*/
//如果next_rq中指定了rq,则据此确定下一个分派的rq对象
if (dd->next_rq[WRITE])
rq = dd->next_rq[WRITE];
else
rq = dd->next_rq[READ];
//指定了rq并且当前的batching数小于预定的batch数值,则进行分派操作
if (rq && dd->batching < dd->fifo_batch)
/* we have a next request are still entitled to batch */
goto dispatch_request;
/*
* at this point we are not running a batch. select the appropriate
* data direction (read / write)
*/
/*到了这里,说明没有指定rq,也就是说要进行权衡来选择分派哪一个rq
首先要选择方向,即选定读请求还是写请求*/
if (reads) {//读请求fifo不为空
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
//如果写请求fifo也不为空,并且当前的starved数值已经超过了writes_starved的数值,
//也就是说之前已经连续处理了starved个读请求了,超过了写请求的饿死线,则选择
//分派一个写请求
if (writes && (dd->starved++ >= dd->writes_starved))
goto dispatch_writes;
data_dir = READ;//确定下一个请求的读写方向为读
goto dispatch_find_request;
}
/*
* there are either no reads or writes have been starved
*/
if (writes) {
dispatch_writes://走到这里,说明没有读请求,或者写请求处于饿死状态,必须被处理
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
dd->starved = 0;//starved重置为0
data_dir = WRITE;//确定下一个请求的读写方向为写
goto dispatch_find_request;
}
return 0;
dispatch_find_request://这里开始重新确定下一批连续分派的rq的第一个rq
/*
* we are not running a batch, find best request for selected data_dir
*/
/*如果jiffies已超过相应fifo list中的第一个rq的期限 或者
上一个请求和现在的请求方向是相反的,则取fifo list中的饥饿rq
一种可能的情况就是读请求过多,导致写请求饿死*/
if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
/*
* A deadline has expired, the last request was in the other
* direction, or we have run out of higher-sectored requests.
* Start again from the request with the earliest expiry time.
*/
rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
} else {//否则,从扇区连续性的角度考虑,接着上次分派的rq继续下一个rq
/*
* The last req was the same dir and we have a next request in
* sort order. No expired requests so continue on from here.
*/
rq = dd->next_rq[data_dir];
}
dd->batching = 0;
dispatch_request:
/*
* rq is the selected appropriate request.
*/
dd->batching++;
deadline_move_request(dd, rq);
return 1;
}
deadline_check_fifo()检查jiffies是否超过了请求的期限值
deadline_move_request()根据要分派的rq设置下一个待分派的rq,并且调用deadline_move_to_dispatch()进行分派工作
- <SPAN style="FONT-SIZE: 12px">static void
- deadline_move_request(struct deadline_data *dd, struct request *rq)
- {
- const int data_dir = rq_data_dir(rq);
- /*先将next_rq都置空*/
- dd->next_rq[READ] = NULL;
- dd->next_rq[WRITE] = NULL;
- //设置相应传输方向的下一个rq,deadline_latter_request()取红黑树中大于rq的下一个节点
- dd->next_rq[data_dir] = deadline_latter_request(rq);
- dd->last_sector = rq_end_sector(rq);
- /*
- * take it off the sort and fifo list, move
- * to dispatch queue
- */
- deadline_move_to_dispatch(dd, rq);
- }
- </SPAN>
static void
deadline_move_request(struct deadline_data *dd, struct request *rq)
{
const int data_dir = rq_data_dir(rq);
/*先将next_rq都置空*/
dd->next_rq[READ] = NULL;
dd->next_rq[WRITE] = NULL;
//设置相应传输方向的下一个rq,deadline_latter_request()取红黑树中大于rq的下一个节点
dd->next_rq[data_dir] = deadline_latter_request(rq);
dd->last_sector = rq_end_sector(rq);
/*
* take it off the sort and fifo list, move
* to dispatch queue
*/
deadline_move_to_dispatch(dd, rq);
}
- <SPAN style="FONT-SIZE: 12px">static inline void
- deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
- {
- struct request_queue *q = rq->q;
- deadline_remove_request(q, rq);//将rq从fifo_list和sort_list中删除
- elv_dispatch_add_tail(q, rq);//添加至所属的request_queue
- }</SPAN>
static inline void
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
struct request_queue *q = rq->q;
deadline_remove_request(q, rq);//将rq从fifo_list和sort_list中删除
elv_dispatch_add_tail(q, rq);//添加至所属的request_queue
}
至此,Deadline Scheduler分析完毕!
最后
以上就是温婉小熊猫为你收集整理的Linux I/O Scheduler--Deadline的全部内容,希望文章能够帮你解决Linux I/O Scheduler--Deadline所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复