我是靠谱客的博主 温婉小熊猫,最近开发中收集的这篇文章主要介绍Linux I/O Scheduler--Deadline,觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

一、原理

Deadline调度器对一个请求的多方面特性进行权衡来进行调度,以期即能满足块设备扇区的顺寻访问又兼顾到一个请求不会在队列中等待太久导致饿死。试想当应用程序频繁访问文件的一部分而此时如果有另一个远端的请求,那么这个请求将会在很长一段时间内得不到响应,这显然是不合理的。Deadline调度器为了兼顾这两个方面,引入了四个队列,这四个队列可分为两类,每一类都由读和写两种队列组成。一类队列用来对请求按起始扇区序号进行排序,通过红黑树来组织,称为sort_list;另一类对请求按它们的生成时间进行排序,由链表来组织,称为fifo_list。

二、batching

每当确定了一个传输方向(读或写),那么将会从相应的sort_list中将一批连续请求dispatch到requst_queue的请求队列里,具体的数目由fifo_batch来确定。只有下面三种情况才会导致一次批量传输的结束:

1.对应的sort_list中已经没有请求了

2.下一个请求的扇区不满足递增的要求

3.上一个请求已经是批量传输的最后一个请求了。

三、deadline和dispatch

所有的请求在生成时都会被赋上一个期限值(根据jiffies),并按期限值排序在fifo_list中,读请求的期限时长默认为为500ms,写请求的期限时长默认为5s,可以看出内核对读请求是十分偏心的,其实不仅如此,在deadline调度器中,还定义了一个starved和writes_starved,writes_starved默认为2,可以理解为写请求的饥饿线,内核总是优先处理读请求,starved表明当前处理的读请求批数,只有starved超过了writes_starved后,才会去考虑写请求。因此,假如一个写请求的期限已经超过,该请求也不一定会被立刻响应,因为读请求的batch还没处理完,即使处理完,也必须等到starved超过writes_starved才有机会被响应。为什么内核会偏袒读请求?这是从整体性能上进行考虑的。读请求和应用程序的关系是同步的,因为应用程序要等待读取的内容完毕,才能进行下一步工作,因此读请求会阻塞进程,而写请求则不一样,应用程序发出写请求后,内存的内容何时写入块设备对程序的影响并不大,所以调度器会优先处理读请求。

四、Deadline Scheduler的实现

下面结合实际代码来分析Deadline Scheduler。首先来看调度器使用的相关数据结构:

[cpp] view plain copy print ?
  1. struct deadline_data {
  2. /*
  3. * run time data
  4. */
  5. /*
  6. * requests (deadline_rq s) are present on both sort_list and fifo_list
  7. */
  8. struct rb_root sort_list[2];
  9. struct list_head fifo_list[2];
  10. /*
  11. * next in sort order. read, write or both are NULL
  12. */
  13. struct request *next_rq[2];
  14. unsigned int batching; /* number of sequential requests made */
  15. sector_t last_sector; /* head position */
  16. unsigned int starved; /* times reads have starved writes */
  17. /*
  18. * settings that change how the i/o scheduler behaves
  19. */
  20. int fifo_expire[2];
  21. int fifo_batch;
  22. int writes_starved;
  23. int front_merges;
  24. };
struct deadline_data {
	/*
	 * run time data
	 */

	/*
	 * requests (deadline_rq s) are present on both sort_list and fifo_list
	 */
	struct rb_root sort_list[2];	
	struct list_head fifo_list[2];

	/*
	 * next in sort order. read, write or both are NULL
	 */
	struct request *next_rq[2];
	unsigned int batching;		/* number of sequential requests made */
	sector_t last_sector;		/* head position */
	unsigned int starved;		/* times reads have starved writes */

	/*
	 * settings that change how the i/o scheduler behaves
	 */
	int fifo_expire[2];
	int fifo_batch;
	int writes_starved;
	int front_merges;
};


sort_list:读写请求的红黑树,以请求的起始扇区来排序

fifo_list:读写请求的链表,以请求的响应期限来排序

next_rq:下一个读(写)请求,当确定一个批量传输时,通过该指针直接获取下一个请求

batching:批量传输的当前值

last_sector:处理的rq的末尾扇区号

starved: 标识着当前是第starved批读请求传输

fifo_expire:读写请求的期限值

fifo_batch:批量传输的请求数

writes_starved:写请求的饿死线,传输了writes_starved批读请求后,必须传输写请求

front_merges:是否使能front merge的检查

Deadline Scheduler的定义:

[cpp] view plain copy print ?
  1. static struct elevator_type iosched_deadline = {
  2. .ops = {
  3. .elevator_merge_fn = deadline_merge,
  4. .elevator_merged_fn = deadline_merged_request,
  5. .elevator_merge_req_fn = deadline_merged_requests,
  6. .elevator_dispatch_fn = deadline_dispatch_requests,
  7. .elevator_add_req_fn = deadline_add_request,
  8. .elevator_queue_empty_fn = deadline_queue_empty,
  9. .elevator_former_req_fn = elv_rb_former_request,
  10. .elevator_latter_req_fn = elv_rb_latter_request,
  11. .elevator_init_fn = deadline_init_queue,
  12. .elevator_exit_fn = deadline_exit_queue,
  13. },
  14. .elevator_attrs = deadline_attrs,
  15. .elevator_name = "deadline",
  16. .elevator_owner = THIS_MODULE,
  17. };
static struct elevator_type iosched_deadline = {
	.ops = {
		.elevator_merge_fn = 		deadline_merge,
		.elevator_merged_fn =		deadline_merged_request,
		.elevator_merge_req_fn =	deadline_merged_requests,
		.elevator_dispatch_fn =		deadline_dispatch_requests,
		.elevator_add_req_fn =		deadline_add_request,
		.elevator_queue_empty_fn =	deadline_queue_empty,
		.elevator_former_req_fn =	elv_rb_former_request,
		.elevator_latter_req_fn =	elv_rb_latter_request,
		.elevator_init_fn =		deadline_init_queue,
		.elevator_exit_fn =		deadline_exit_queue,
	},

	.elevator_attrs = deadline_attrs,
	.elevator_name = "deadline",
	.elevator_owner = THIS_MODULE,
};


初始化函数deadline_init_queue()用于初始化struct deadline_data中的数据,没有太多好说的,先来看检查一个bio是否能合并到request中的函数deadline_merge()

[cpp] view plain copy print ?
  1. static int
  2. deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
  3. {
  4. struct deadline_data *dd = q->elevator->elevator_data;
  5. struct request *__rq;
  6. int ret;
  7. /*
  8. * check for front merge
  9. */
  10. if (dd->front_merges) {//在deadline scheduler使能了front_merges的情况下才会进行front merge的检查
  11. sector_t sector = bio->bi_sector + bio_sectors(bio);//取bio的最后一个扇区
  12. //从红黑树中查找起始扇区号与sector相同的request
  13. __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
  14. if (__rq) {//查找成功
  15. BUG_ON(sector != blk_rq_pos(__rq));
  16. if (elv_rq_merge_ok(__rq, bio)) {//各项属性的检查,确定bio可以插入
  17. ret = ELEVATOR_FRONT_MERGE;//设置状态
  18. goto out;
  19. }
  20. }
  21. }
  22. return ELEVATOR_NO_MERGE;
  23. out:
  24. *req = __rq;
  25. return ret;//将检查的结果返回给通用层
  26. }
static int
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	struct request *__rq;
	int ret;

	/*
	 * check for front merge
	 */
	if (dd->front_merges) {//在deadline scheduler使能了front_merges的情况下才会进行front merge的检查
		sector_t sector = bio->bi_sector + bio_sectors(bio);//取bio的最后一个扇区

		//从红黑树中查找起始扇区号与sector相同的request
		__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
		if (__rq) {//查找成功
			BUG_ON(sector != blk_rq_pos(__rq));

			if (elv_rq_merge_ok(__rq, bio)) {//各项属性的检查,确定bio可以插入
				ret = ELEVATOR_FRONT_MERGE;//设置状态
				goto out;
			}
		}
	}

	return ELEVATOR_NO_MERGE;
out:
	*req = __rq;
	return ret;//将检查的结果返回给通用层
}


有朋友可能会疑问这里为什么只做前插入的检查而不做后插入的检查,显然后插入的可能性更高(根据访问文件的顺序,由低扇区到高扇区)。正是因为后插入的可能性大大高于前插入,因此在通用层,内核就做了后插入的检查了,后插入是根据调度器的散列表来进行查询的,这个我在递交IO请求的那一部分有介绍过。

pass:在分析调度器的相关函数时还得紧扣__make_request()这个通用层函数。

函数deadline_merged_request进行bio插入的善后工作。。主要是考虑前插入改变了原红黑树节点的值,所以要将节点删除再重新进行插入。

[cpp] view plain copy print ?
  1. <SPAN style="FONT-SIZE: 12px">static void deadline_merged_request(struct request_queue *q,
  2. struct request *req, int type)
  3. {
  4. struct deadline_data *dd = q->elevator->elevator_data;
  5. /*
  6. * if the merge was a front merge, we need to reposition request
  7. */
  8. if (type == ELEVATOR_FRONT_MERGE) {//如果是是将bio插入request的bio链表的前面则要进行request的重定位
  9. elv_rb_del(deadline_rb_root(dd, req), req);//将request从红黑树中删除
  10. deadline_add_rq_rb(dd, req);//重新添加至红黑树
  11. }
  12. }</SPAN>
static void deadline_merged_request(struct request_queue *q,
				    struct request *req, int type)
{
	struct deadline_data *dd = q->elevator->elevator_data;

	/*
	 * if the merge was a front merge, we need to reposition request
	 */
	if (type == ELEVATOR_FRONT_MERGE) {//如果是是将bio插入request的bio链表的前面则要进行request的重定位
		elv_rb_del(deadline_rb_root(dd, req), req);//将request从红黑树中删除
		deadline_add_rq_rb(dd, req);//重新添加至红黑树
	}
}

在通用层进行request的合并后,deadline_merged_requests()函数负责善后,注意合并时都是保留前request,舍弃后request

[cpp] view plain copy print ?
  1. <SPAN style="FONT-SIZE: 12px">static void
  2. deadline_merged_requests(struct request_queue *q, struct request *req,
  3. struct request *next)
  4. {
  5. /*
  6. * if next expires before rq, assign its expire time to rq
  7. * and move into next position (next will be deleted) in fifo
  8. */
  9. /*首先要保证两个请求的所属的队列不为空,然后根据req和next的响应期限时间长短,来选择保留哪个,如果
  10. 后者比前者的期限时间短,也就是先响应,那就要将next的期限时间赋给req,并且将req放置到next在fifo的
  11. 位置,因为next将要被删除*/
  12. if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
  13. //如果next的期限时间小于req
  14. if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
  15. list_move(&req->queuelist, &next->queuelist);//调整req在fifo的位置
  16. rq_set_fifo_time(req, rq_fifo_time(next));//重置req的期限时间
  17. }
  18. }
  19. /*
  20. * kill knowledge of next, this one is a goner
  21. */
  22. //将next从链表和红黑树中删除
  23. deadline_remove_request(q, next);
  24. }
  25. </SPAN>
static void
deadline_merged_requests(struct request_queue *q, struct request *req,
			 struct request *next)
{
	/*
	 * if next expires before rq, assign its expire time to rq
	 * and move into next position (next will be deleted) in fifo
	 */
	 /*首先要保证两个请求的所属的队列不为空,然后根据req和next的响应期限时间长短,来选择保留哪个,如果
	    后者比前者的期限时间短,也就是先响应,那就要将next的期限时间赋给req,并且将req放置到next在fifo的
	    位置,因为next将要被删除*/
	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
		//如果next的期限时间小于req
		if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
			list_move(&req->queuelist, &next->queuelist);//调整req在fifo的位置
			rq_set_fifo_time(req, rq_fifo_time(next));//重置req的期限时间
		}
	}

	/*
	 * kill knowledge of next, this one is a goner
	 */
	 //将next从链表和红黑树中删除
	deadline_remove_request(q, next);
}



deadline_add_request()用于将一个新请求添加至调度器,主要是插入各个数据结构,并设置期限值

[cpp] view plain copy print ?
  1. <SPAN style="FONT-SIZE: 12px">static void
  2. deadline_add_request(struct request_queue *q, struct request *rq)
  3. {
  4. struct deadline_data *dd = q->elevator->elevator_data;
  5. const int data_dir = rq_data_dir(rq);//获取request的读写方向
  6. deadline_add_rq_rb(dd, rq);//将rq插入红黑树
  7. /*
  8. * set expire time and add to fifo list
  9. */
  10. //设置期限时间
  11. rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
  12. //将rq插入fifo_list
  13. list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
  14. }
  15. </SPAN>
static void
deadline_add_request(struct request_queue *q, struct request *rq)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	const int data_dir = rq_data_dir(rq);//获取request的读写方向

	deadline_add_rq_rb(dd, rq);//将rq插入红黑树

	/*
	 * set expire time and add to fifo list
	 */
	//设置期限时间
	rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
	//将rq插入fifo_list
	list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}


最后要分析的一个函数,也是最重要的一个--deadline_dispatch_requests 调度器如何选择request,分派给request_queue

[cpp] view plain copy print ?
  1. <SPAN style="FONT-SIZE: 12px">static int deadline_dispatch_requests(struct request_queue *q, int force)
  2. {
  3. struct deadline_data *dd = q->elevator->elevator_data;
  4. //确定读写fifo的状态
  5. const int reads = !list_empty(&dd->fifo_list[READ]);
  6. const int writes = !list_empty(&dd->fifo_list[WRITE]);
  7. struct request *rq;
  8. int data_dir;
  9. /*
  10. * batches are currently reads XOR writes
  11. */
  12. //如果next_rq中指定了rq,则据此确定下一个分派的rq对象
  13. if (dd->next_rq[WRITE])
  14. rq = dd->next_rq[WRITE];
  15. else
  16. rq = dd->next_rq[READ];
  17. //指定了rq并且当前的batching数小于预定的batch数值,则进行分派操作
  18. if (rq && dd->batching < dd->fifo_batch)
  19. /* we have a next request are still entitled to batch */
  20. goto dispatch_request;
  21. /*
  22. * at this point we are not running a batch. select the appropriate
  23. * data direction (read / write)
  24. */
  25. /*到了这里,说明没有指定rq,也就是说要进行权衡来选择分派哪一个rq
  26. 首先要选择方向,即选定读请求还是写请求*/
  27. if (reads) {//读请求fifo不为空
  28. BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
  29. //如果写请求fifo也不为空,并且当前的starved数值已经超过了writes_starved的数值,
  30. //也就是说之前已经连续处理了starved个读请求了,超过了写请求的饿死线,则选择
  31. //分派一个写请求
  32. if (writes && (dd->starved++ >= dd->writes_starved))
  33. goto dispatch_writes;
  34. data_dir = READ;//确定下一个请求的读写方向为读
  35. goto dispatch_find_request;
  36. }
  37. /*
  38. * there are either no reads or writes have been starved
  39. */
  40. if (writes) {
  41. dispatch_writes://走到这里,说明没有读请求,或者写请求处于饿死状态,必须被处理
  42. BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
  43. dd->starved = 0;//starved重置为0
  44. data_dir = WRITE;//确定下一个请求的读写方向为写
  45. goto dispatch_find_request;
  46. }
  47. return 0;
  48. dispatch_find_request://这里开始重新确定下一批连续分派的rq的第一个rq
  49. /*
  50. * we are not running a batch, find best request for selected data_dir
  51. */
  52. /*如果jiffies已超过相应fifo list中的第一个rq的期限 或者
  53. 上一个请求和现在的请求方向是相反的,则取fifo list中的饥饿rq
  54. 一种可能的情况就是读请求过多,导致写请求饿死*/
  55. if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
  56. /*
  57. * A deadline has expired, the last request was in the other
  58. * direction, or we have run out of higher-sectored requests.
  59. * Start again from the request with the earliest expiry time.
  60. */
  61. rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
  62. } else {//否则,从扇区连续性的角度考虑,接着上次分派的rq继续下一个rq
  63. /*
  64. * The last req was the same dir and we have a next request in
  65. * sort order. No expired requests so continue on from here.
  66. */
  67. rq = dd->next_rq[data_dir];
  68. }
  69. dd->batching = 0;
  70. dispatch_request:
  71. /*
  72. * rq is the selected appropriate request.
  73. */
  74. dd->batching++;
  75. deadline_move_request(dd, rq);
  76. return 1;
  77. }</SPAN>
static int deadline_dispatch_requests(struct request_queue *q, int force)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	//确定读写fifo的状态
	const int reads = !list_empty(&dd->fifo_list[READ]);
	const int writes = !list_empty(&dd->fifo_list[WRITE]);
	struct request *rq;
	int data_dir;

	/*
	 * batches are currently reads XOR writes
	 */
	 //如果next_rq中指定了rq,则据此确定下一个分派的rq对象
	if (dd->next_rq[WRITE])
		rq = dd->next_rq[WRITE];
	else
		rq = dd->next_rq[READ];

	//指定了rq并且当前的batching数小于预定的batch数值,则进行分派操作
	if (rq && dd->batching < dd->fifo_batch)
		/* we have a next request are still entitled to batch */
		goto dispatch_request;

	/*
	 * at this point we are not running a batch. select the appropriate
	 * data direction (read / write)
	 */

	/*到了这里,说明没有指定rq,也就是说要进行权衡来选择分派哪一个rq
	  首先要选择方向,即选定读请求还是写请求*/

	if (reads) {//读请求fifo不为空
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));

		//如果写请求fifo也不为空,并且当前的starved数值已经超过了writes_starved的数值,
		//也就是说之前已经连续处理了starved个读请求了,超过了写请求的饿死线,则选择
		//分派一个写请求
		if (writes && (dd->starved++ >= dd->writes_starved))
			goto dispatch_writes;

		data_dir = READ;//确定下一个请求的读写方向为读

		goto dispatch_find_request;
	}

	/*
	 * there are either no reads or writes have been starved
	 */
	
	if (writes) {
dispatch_writes://走到这里,说明没有读请求,或者写请求处于饿死状态,必须被处理
		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));

		dd->starved = 0;//starved重置为0

		data_dir = WRITE;//确定下一个请求的读写方向为写

		goto dispatch_find_request;
	}

	return 0;

dispatch_find_request://这里开始重新确定下一批连续分派的rq的第一个rq
	/*
	 * we are not running a batch, find best request for selected data_dir
	 */
	 /*如果jiffies已超过相应fifo list中的第一个rq的期限 或者
	   上一个请求和现在的请求方向是相反的,则取fifo list中的饥饿rq
	   一种可能的情况就是读请求过多,导致写请求饿死*/
	if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
		/*
		 * A deadline has expired, the last request was in the other
		 * direction, or we have run out of higher-sectored requests.
		 * Start again from the request with the earliest expiry time.
		 */
		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
	} else {//否则,从扇区连续性的角度考虑,接着上次分派的rq继续下一个rq
	          
		/*
		 * The last req was the same dir and we have a next request in
		 * sort order. No expired requests so continue on from here.
		 */
		rq = dd->next_rq[data_dir];
	}

	dd->batching = 0;

dispatch_request:
	/*
	 * rq is the selected appropriate request.
	 */
	dd->batching++;
	deadline_move_request(dd, rq);

	return 1;
}

deadline_check_fifo()检查jiffies是否超过了请求的期限值

deadline_move_request()根据要分派的rq设置下一个待分派的rq,并且调用deadline_move_to_dispatch()进行分派工作

[cpp] view plain copy print ?
  1. <SPAN style="FONT-SIZE: 12px">static void
  2. deadline_move_request(struct deadline_data *dd, struct request *rq)
  3. {
  4. const int data_dir = rq_data_dir(rq);
  5. /*先将next_rq都置空*/
  6. dd->next_rq[READ] = NULL;
  7. dd->next_rq[WRITE] = NULL;
  8. //设置相应传输方向的下一个rq,deadline_latter_request()取红黑树中大于rq的下一个节点
  9. dd->next_rq[data_dir] = deadline_latter_request(rq);
  10. dd->last_sector = rq_end_sector(rq);
  11. /*
  12. * take it off the sort and fifo list, move
  13. * to dispatch queue
  14. */
  15. deadline_move_to_dispatch(dd, rq);
  16. }
  17. </SPAN>
static void
deadline_move_request(struct deadline_data *dd, struct request *rq)
{
	const int data_dir = rq_data_dir(rq);

	/*先将next_rq都置空*/
	dd->next_rq[READ] = NULL;
	dd->next_rq[WRITE] = NULL;
	//设置相应传输方向的下一个rq,deadline_latter_request()取红黑树中大于rq的下一个节点
	dd->next_rq[data_dir] = deadline_latter_request(rq);

	dd->last_sector = rq_end_sector(rq);

	/*
	 * take it off the sort and fifo list, move
	 * to dispatch queue
	 */
	deadline_move_to_dispatch(dd, rq);
}
[cpp] view plain copy print ?
  1. <SPAN style="FONT-SIZE: 12px">static inline void
  2. deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
  3. {
  4. struct request_queue *q = rq->q;
  5. deadline_remove_request(q, rq);//将rq从fifo_list和sort_list中删除
  6. elv_dispatch_add_tail(q, rq);//添加至所属的request_queue
  7. }</SPAN>
static inline void
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
	struct request_queue *q = rq->q;

	deadline_remove_request(q, rq);//将rq从fifo_list和sort_list中删除
	elv_dispatch_add_tail(q, rq);//添加至所属的request_queue
}

至此,Deadline Scheduler分析完毕!

最后

以上就是温婉小熊猫为你收集整理的Linux I/O Scheduler--Deadline的全部内容,希望文章能够帮你解决Linux I/O Scheduler--Deadline所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(50)

评论列表共有 0 条评论

立即
投稿
返回
顶部