Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
deadline-iosched.c
Go to the documentation of this file.
1 /*
2  * Deadline i/o scheduler.
3  *
4  * Copyright (C) 2002 Jens Axboe <[email protected]>
5  */
6 #include <linux/kernel.h>
7 #include <linux/fs.h>
8 #include <linux/blkdev.h>
9 #include <linux/elevator.h>
10 #include <linux/bio.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/compiler.h>
15 #include <linux/rbtree.h>
16 
17 /*
18  * See Documentation/block/deadline-iosched.txt
19  */
20 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
21 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
22 static const int writes_starved = 2; /* max times reads can starve a write */
23 static const int fifo_batch = 16; /* # of sequential requests treated as one
24  by the above parameters. For throughput. */
25 
26 struct deadline_data {
27  /*
28  * run time data
29  */
30 
31  /*
32  * requests (deadline_rq s) are present on both sort_list and fifo_list
33  */
34  struct rb_root sort_list[2];
35  struct list_head fifo_list[2];
36 
37  /*
38  * next in sort order. read, write or both are NULL
39  */
40  struct request *next_rq[2];
41  unsigned int batching; /* number of sequential requests made */
42  sector_t last_sector; /* head position */
43  unsigned int starved; /* times reads have starved writes */
44 
45  /*
46  * settings that change how the i/o scheduler behaves
47  */
48  int fifo_expire[2];
52 };
53 
54 static void deadline_move_request(struct deadline_data *, struct request *);
55 
56 static inline struct rb_root *
57 deadline_rb_root(struct deadline_data *dd, struct request *rq)
58 {
59  return &dd->sort_list[rq_data_dir(rq)];
60 }
61 
62 /*
63  * get the request after `rq' in sector-sorted order
64  */
65 static inline struct request *
66 deadline_latter_request(struct request *rq)
67 {
68  struct rb_node *node = rb_next(&rq->rb_node);
69 
70  if (node)
71  return rb_entry_rq(node);
72 
73  return NULL;
74 }
75 
76 static void
77 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
78 {
79  struct rb_root *root = deadline_rb_root(dd, rq);
80 
81  elv_rb_add(root, rq);
82 }
83 
84 static inline void
85 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
86 {
87  const int data_dir = rq_data_dir(rq);
88 
89  if (dd->next_rq[data_dir] == rq)
90  dd->next_rq[data_dir] = deadline_latter_request(rq);
91 
92  elv_rb_del(deadline_rb_root(dd, rq), rq);
93 }
94 
95 /*
96  * add rq to rbtree and fifo
97  */
98 static void
99 deadline_add_request(struct request_queue *q, struct request *rq)
100 {
101  struct deadline_data *dd = q->elevator->elevator_data;
102  const int data_dir = rq_data_dir(rq);
103 
104  deadline_add_rq_rb(dd, rq);
105 
106  /*
107  * set expire time and add to fifo list
108  */
109  rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
110  list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
111 }
112 
113 /*
114  * remove rq from rbtree and fifo.
115  */
116 static void deadline_remove_request(struct request_queue *q, struct request *rq)
117 {
118  struct deadline_data *dd = q->elevator->elevator_data;
119 
120  rq_fifo_clear(rq);
121  deadline_del_rq_rb(dd, rq);
122 }
123 
124 static int
125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
126 {
127  struct deadline_data *dd = q->elevator->elevator_data;
128  struct request *__rq;
129  int ret;
130 
131  /*
132  * check for front merge
133  */
134  if (dd->front_merges) {
135  sector_t sector = bio->bi_sector + bio_sectors(bio);
136 
137  __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
138  if (__rq) {
139  BUG_ON(sector != blk_rq_pos(__rq));
140 
141  if (elv_rq_merge_ok(__rq, bio)) {
142  ret = ELEVATOR_FRONT_MERGE;
143  goto out;
144  }
145  }
146  }
147 
148  return ELEVATOR_NO_MERGE;
149 out:
150  *req = __rq;
151  return ret;
152 }
153 
154 static void deadline_merged_request(struct request_queue *q,
155  struct request *req, int type)
156 {
157  struct deadline_data *dd = q->elevator->elevator_data;
158 
159  /*
160  * if the merge was a front merge, we need to reposition request
161  */
162  if (type == ELEVATOR_FRONT_MERGE) {
163  elv_rb_del(deadline_rb_root(dd, req), req);
164  deadline_add_rq_rb(dd, req);
165  }
166 }
167 
168 static void
169 deadline_merged_requests(struct request_queue *q, struct request *req,
170  struct request *next)
171 {
172  /*
173  * if next expires before rq, assign its expire time to rq
174  * and move into next position (next will be deleted) in fifo
175  */
176  if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
177  if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
178  list_move(&req->queuelist, &next->queuelist);
179  rq_set_fifo_time(req, rq_fifo_time(next));
180  }
181  }
182 
183  /*
184  * kill knowledge of next, this one is a goner
185  */
186  deadline_remove_request(q, next);
187 }
188 
189 /*
190  * move request from sort list to dispatch queue.
191  */
192 static inline void
193 deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
194 {
195  struct request_queue *q = rq->q;
196 
197  deadline_remove_request(q, rq);
198  elv_dispatch_add_tail(q, rq);
199 }
200 
201 /*
202  * move an entry to dispatch queue
203  */
204 static void
205 deadline_move_request(struct deadline_data *dd, struct request *rq)
206 {
207  const int data_dir = rq_data_dir(rq);
208 
209  dd->next_rq[READ] = NULL;
210  dd->next_rq[WRITE] = NULL;
211  dd->next_rq[data_dir] = deadline_latter_request(rq);
212 
213  dd->last_sector = rq_end_sector(rq);
214 
215  /*
216  * take it off the sort and fifo list, move
217  * to dispatch queue
218  */
219  deadline_move_to_dispatch(dd, rq);
220 }
221 
222 /*
223  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
224  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
225  */
226 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
227 {
228  struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
229 
230  /*
231  * rq is expired!
232  */
233  if (time_after(jiffies, rq_fifo_time(rq)))
234  return 1;
235 
236  return 0;
237 }
238 
239 /*
240  * deadline_dispatch_requests selects the best request according to
241  * read/write expire, fifo_batch, etc
242  */
243 static int deadline_dispatch_requests(struct request_queue *q, int force)
244 {
245  struct deadline_data *dd = q->elevator->elevator_data;
246  const int reads = !list_empty(&dd->fifo_list[READ]);
247  const int writes = !list_empty(&dd->fifo_list[WRITE]);
248  struct request *rq;
249  int data_dir;
250 
251  /*
252  * batches are currently reads XOR writes
253  */
254  if (dd->next_rq[WRITE])
255  rq = dd->next_rq[WRITE];
256  else
257  rq = dd->next_rq[READ];
258 
259  if (rq && dd->batching < dd->fifo_batch)
260  /* we have a next request are still entitled to batch */
261  goto dispatch_request;
262 
263  /*
264  * at this point we are not running a batch. select the appropriate
265  * data direction (read / write)
266  */
267 
268  if (reads) {
270 
271  if (writes && (dd->starved++ >= dd->writes_starved))
272  goto dispatch_writes;
273 
274  data_dir = READ;
275 
276  goto dispatch_find_request;
277  }
278 
279  /*
280  * there are either no reads or writes have been starved
281  */
282 
283  if (writes) {
284 dispatch_writes:
286 
287  dd->starved = 0;
288 
289  data_dir = WRITE;
290 
291  goto dispatch_find_request;
292  }
293 
294  return 0;
295 
296 dispatch_find_request:
297  /*
298  * we are not running a batch, find best request for selected data_dir
299  */
300  if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
301  /*
302  * A deadline has expired, the last request was in the other
303  * direction, or we have run out of higher-sectored requests.
304  * Start again from the request with the earliest expiry time.
305  */
306  rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
307  } else {
308  /*
309  * The last req was the same dir and we have a next request in
310  * sort order. No expired requests so continue on from here.
311  */
312  rq = dd->next_rq[data_dir];
313  }
314 
315  dd->batching = 0;
316 
317 dispatch_request:
318  /*
319  * rq is the selected appropriate request.
320  */
321  dd->batching++;
322  deadline_move_request(dd, rq);
323 
324  return 1;
325 }
326 
327 static void deadline_exit_queue(struct elevator_queue *e)
328 {
329  struct deadline_data *dd = e->elevator_data;
330 
331  BUG_ON(!list_empty(&dd->fifo_list[READ]));
332  BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
333 
334  kfree(dd);
335 }
336 
337 /*
338  * initialize elevator private data (deadline_data).
339  */
340 static int deadline_init_queue(struct request_queue *q)
341 {
342  struct deadline_data *dd;
343 
344  dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
345  if (!dd)
346  return -ENOMEM;
347 
348  INIT_LIST_HEAD(&dd->fifo_list[READ]);
349  INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
350  dd->sort_list[READ] = RB_ROOT;
351  dd->sort_list[WRITE] = RB_ROOT;
352  dd->fifo_expire[READ] = read_expire;
353  dd->fifo_expire[WRITE] = write_expire;
355  dd->front_merges = 1;
356  dd->fifo_batch = fifo_batch;
357 
358  q->elevator->elevator_data = dd;
359  return 0;
360 }
361 
362 /*
363  * sysfs parts below
364  */
365 
366 static ssize_t
367 deadline_var_show(int var, char *page)
368 {
369  return sprintf(page, "%d\n", var);
370 }
371 
372 static ssize_t
373 deadline_var_store(int *var, const char *page, size_t count)
374 {
375  char *p = (char *) page;
376 
377  *var = simple_strtol(p, &p, 10);
378  return count;
379 }
380 
381 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
382 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
383 { \
384  struct deadline_data *dd = e->elevator_data; \
385  int __data = __VAR; \
386  if (__CONV) \
387  __data = jiffies_to_msecs(__data); \
388  return deadline_var_show(__data, (page)); \
389 }
390 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
391 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
392 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
393 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
394 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
395 #undef SHOW_FUNCTION
396 
397 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
398 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
399 { \
400  struct deadline_data *dd = e->elevator_data; \
401  int __data; \
402  int ret = deadline_var_store(&__data, (page), count); \
403  if (__data < (MIN)) \
404  __data = (MIN); \
405  else if (__data > (MAX)) \
406  __data = (MAX); \
407  if (__CONV) \
408  *(__PTR) = msecs_to_jiffies(__data); \
409  else \
410  *(__PTR) = __data; \
411  return ret; \
412 }
413 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
414 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
415 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
416 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
417 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
418 #undef STORE_FUNCTION
419 
420 #define DD_ATTR(name) \
421  __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
422  deadline_##name##_store)
423 
424 static struct elv_fs_entry deadline_attrs[] = {
425  DD_ATTR(read_expire),
426  DD_ATTR(write_expire),
427  DD_ATTR(writes_starved),
428  DD_ATTR(front_merges),
429  DD_ATTR(fifo_batch),
431 };
432 
433 static struct elevator_type iosched_deadline = {
434  .ops = {
435  .elevator_merge_fn = deadline_merge,
436  .elevator_merged_fn = deadline_merged_request,
437  .elevator_merge_req_fn = deadline_merged_requests,
438  .elevator_dispatch_fn = deadline_dispatch_requests,
439  .elevator_add_req_fn = deadline_add_request,
440  .elevator_former_req_fn = elv_rb_former_request,
441  .elevator_latter_req_fn = elv_rb_latter_request,
442  .elevator_init_fn = deadline_init_queue,
443  .elevator_exit_fn = deadline_exit_queue,
444  },
445 
446  .elevator_attrs = deadline_attrs,
447  .elevator_name = "deadline",
448  .elevator_owner = THIS_MODULE,
449 };
450 
451 static int __init deadline_init(void)
452 {
453  return elv_register(&iosched_deadline);
454 }
455 
456 static void __exit deadline_exit(void)
457 {
458  elv_unregister(&iosched_deadline);
459 }
460 
461 module_init(deadline_init);
462 module_exit(deadline_exit);
463 
464 MODULE_AUTHOR("Jens Axboe");
465 MODULE_LICENSE("GPL");
466 MODULE_DESCRIPTION("deadline IO scheduler");