Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cfq-iosched.c
Go to the documentation of this file.
1 /*
2  * CFQ, or complete fairness queueing, disk scheduler.
3  *
4  * Based on ideas from a previously unfinished io
5  * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  * Copyright (C) 2003 Jens Axboe <[email protected]>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include "blk.h"
18 #include "blk-cgroup.h"
19 
20 /*
21  * tunables
22  */
23 /* max queue in one round of service */
24 static const int cfq_quantum = 8;
25 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
26 /* maximum backwards seek, in KiB */
27 static const int cfq_back_max = 16 * 1024;
28 /* penalty of a backwards seek */
29 static const int cfq_back_penalty = 2;
30 static const int cfq_slice_sync = HZ / 10;
31 static int cfq_slice_async = HZ / 25;
32 static const int cfq_slice_async_rq = 2;
33 static int cfq_slice_idle = HZ / 125;
34 static int cfq_group_idle = HZ / 125;
35 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36 static const int cfq_hist_divisor = 4;
37 
38 /*
39  * offset from end of service tree
40  */
41 #define CFQ_IDLE_DELAY (HZ / 5)
42 
43 /*
44  * below this threshold, we consider thinktime immediate
45  */
46 #define CFQ_MIN_TT (2)
47 
48 #define CFQ_SLICE_SCALE (5)
49 #define CFQ_HW_QUEUE_MIN (5)
50 #define CFQ_SERVICE_SHIFT 12
51 
52 #define CFQQ_SEEK_THR (sector_t)(8 * 100)
53 #define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
54 #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
55 #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
56 
57 #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
60 
61 static struct kmem_cache *cfq_pool;
62 
63 #define CFQ_PRIO_LISTS IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66 
67 #define sample_valid(samples) ((samples) > 80)
68 #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
69 
70 struct cfq_ttime {
71  unsigned long last_end_request;
72 
73  unsigned long ttime_total;
74  unsigned long ttime_samples;
75  unsigned long ttime_mean;
76 };
77 
78 /*
79  * Most of our rbtree usage is for sorting with min extraction, so
80  * if we cache the leftmost node we don't have to walk down the tree
81  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82  * move this into the elevator for the rq sorting as well.
83  */
84 struct cfq_rb_root {
85  struct rb_root rb;
86  struct rb_node *left;
87  unsigned count;
88  unsigned total_weight;
90  struct cfq_ttime ttime;
91 };
92 #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
93  .ttime = {.last_end_request = jiffies,},}
94 
95 /*
96  * Per process-grouping structure
97  */
98 struct cfq_queue {
99  /* reference count */
100  int ref;
101  /* various state flags, see below */
102  unsigned int flags;
103  /* parent cfq_data */
104  struct cfq_data *cfqd;
105  /* service_tree member */
106  struct rb_node rb_node;
107  /* service_tree key */
108  unsigned long rb_key;
109  /* prio tree member */
110  struct rb_node p_node;
111  /* prio tree root we belong to, if any */
112  struct rb_root *p_root;
113  /* sorted list of pending requests */
115  /* if fifo isn't expired, next request to serve */
116  struct request *next_rq;
117  /* requests queued in sort_list */
118  int queued[2];
119  /* currently allocated requests */
120  int allocated[2];
121  /* fifo list of requests in sort_list */
122  struct list_head fifo;
123 
124  /* time when queue got scheduled in to dispatch first request. */
125  unsigned long dispatch_start;
126  unsigned int allocated_slice;
127  unsigned int slice_dispatch;
128  /* time when first request from queue completed and slice started. */
129  unsigned long slice_start;
130  unsigned long slice_end;
132 
133  /* pending priority requests */
135  /* number of requests that are on the dispatch list or inside driver */
137 
138  /* io prio of this group */
139  unsigned short ioprio, org_ioprio;
140  unsigned short ioprio_class;
141 
143 
146 
149  struct cfq_group *cfqg;
150  /* Number of sectors dispatched from queue in single dispatch round */
151  unsigned long nr_sectors;
152 };
153 
154 /*
155  * First index in the service_trees.
156  * IDLE is handled separately, so it has negative index
157  */
158 enum wl_prio_t {
163 };
164 
165 /*
166  * Second index in the service_trees.
167  */
168 enum wl_type_t {
172 };
173 
174 struct cfqg_stats {
175 #ifdef CONFIG_CFQ_GROUP_IOSCHED
176  /* total bytes transferred */
177  struct blkg_rwstat service_bytes;
178  /* total IOs serviced, post merge */
179  struct blkg_rwstat serviced;
180  /* number of ios merged */
181  struct blkg_rwstat merged;
182  /* total time spent on device in ns, may not be accurate w/ queueing */
183  struct blkg_rwstat service_time;
184  /* total time spent waiting in scheduler queue in ns */
185  struct blkg_rwstat wait_time;
186  /* number of IOs queued up */
187  struct blkg_rwstat queued;
188  /* total sectors transferred */
189  struct blkg_stat sectors;
190  /* total disk time and nr sectors dispatched by this group */
191  struct blkg_stat time;
192 #ifdef CONFIG_DEBUG_BLK_CGROUP
193  /* time not charged to this cgroup */
194  struct blkg_stat unaccounted_time;
195  /* sum of number of ios queued across all samples */
196  struct blkg_stat avg_queue_size_sum;
197  /* count of samples taken for average */
198  struct blkg_stat avg_queue_size_samples;
199  /* how many times this group has been removed from service tree */
200  struct blkg_stat dequeue;
201  /* total time spent waiting for it to be assigned a timeslice. */
202  struct blkg_stat group_wait_time;
203  /* time spent idling for this blkcg_gq */
204  struct blkg_stat idle_time;
205  /* total time with empty current active q with other requests queued */
206  struct blkg_stat empty_time;
207  /* fields after this shouldn't be cleared on stat reset */
208  uint64_t start_group_wait_time;
209  uint64_t start_idle_time;
210  uint64_t start_empty_time;
211  uint16_t flags;
212 #endif /* CONFIG_DEBUG_BLK_CGROUP */
213 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
214 };
215 
216 /* This is per cgroup per device grouping structure */
217 struct cfq_group {
218  /* must be the first member */
220 
221  /* group service_tree member */
222  struct rb_node rb_node;
223 
224  /* group service_tree key */
226  unsigned int weight;
227  unsigned int new_weight;
228  unsigned int dev_weight;
229 
230  /* number of cfqq currently on this group */
231  int nr_cfqq;
232 
233  /*
234  * Per group busy queues average. Useful for workload slice calc. We
235  * create the array for each prio class but at run time it is used
236  * only for RT and BE class and slot for IDLE class remains unused.
237  * This is primarily done to avoid confusion and a gcc warning.
238  */
240  /*
241  * rr lists of queues with requests. We maintain service trees for
242  * RT and BE classes. These trees are subdivided in subclasses
243  * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
244  * class there is no subclassification and all the cfq queues go on
245  * a single tree service_tree_idle.
246  * Counts are embedded in the cfq_rb_root
247  */
250 
251  unsigned long saved_workload_slice;
254 
255  /* number of requests that are on the dispatch list or inside driver */
257  struct cfq_ttime ttime;
259 };
260 
261 struct cfq_io_cq {
262  struct io_cq icq; /* must be the first member */
263  struct cfq_queue *cfqq[2];
264  struct cfq_ttime ttime;
265  int ioprio; /* the current ioprio */
266 #ifdef CONFIG_CFQ_GROUP_IOSCHED
267  uint64_t blkcg_id; /* the current blkcg ID */
268 #endif
269 };
270 
271 /*
272  * Per block device queue structure
273  */
274 struct cfq_data {
276  /* Root service tree for cfq_groups */
279 
280  /*
281  * The priority currently being served
282  */
285  unsigned long workload_expires;
287 
288  /*
289  * Each priority tree is sorted by next_request position. These
290  * trees are used when determining if two or more queues are
291  * interleaving requests (see cfq_close_cooperator).
292  */
294 
295  unsigned int busy_queues;
296  unsigned int busy_sync_queues;
297 
299  int rq_in_flight[2];
300 
301  /*
302  * queue-depth detection
303  */
305  int hw_tag;
306  /*
307  * hw_tag can be
308  * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
309  * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
310  * 0 => no NCQ
311  */
313  unsigned int hw_tag_samples;
314 
315  /*
316  * idle window management
317  */
320 
323 
324  /*
325  * async queue for each priority case
326  */
329 
331 
332  /*
333  * tunables, see top of file
334  */
335  unsigned int cfq_quantum;
336  unsigned int cfq_fifo_expire[2];
337  unsigned int cfq_back_penalty;
338  unsigned int cfq_back_max;
339  unsigned int cfq_slice[2];
340  unsigned int cfq_slice_async_rq;
341  unsigned int cfq_slice_idle;
342  unsigned int cfq_group_idle;
343  unsigned int cfq_latency;
344  unsigned int cfq_target_latency;
345 
346  /*
347  * Fallback dummy cfqq for extreme OOM conditions
348  */
350 
351  unsigned long last_delayed_sync;
352 };
353 
354 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
355 
356 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
357  enum wl_prio_t prio,
358  enum wl_type_t type)
359 {
360  if (!cfqg)
361  return NULL;
362 
363  if (prio == IDLE_WORKLOAD)
364  return &cfqg->service_tree_idle;
365 
366  return &cfqg->service_trees[prio][type];
367 }
368 
370  CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
371  CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
372  CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
373  CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
374  CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
375  CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
376  CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
377  CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
378  CFQ_CFQQ_FLAG_sync, /* synchronous queue */
379  CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
380  CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
381  CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
382  CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
383 };
384 
385 #define CFQ_CFQQ_FNS(name) \
386 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
387 { \
388  (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
389 } \
390 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
391 { \
392  (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
393 } \
394 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
395 { \
396  return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
397 }
398 
399 CFQ_CFQQ_FNS(on_rr);
400 CFQ_CFQQ_FNS(wait_request);
401 CFQ_CFQQ_FNS(must_dispatch);
402 CFQ_CFQQ_FNS(must_alloc_slice);
403 CFQ_CFQQ_FNS(fifo_expire);
404 CFQ_CFQQ_FNS(idle_window);
405 CFQ_CFQQ_FNS(prio_changed);
406 CFQ_CFQQ_FNS(slice_new);
408 CFQ_CFQQ_FNS(coop);
409 CFQ_CFQQ_FNS(split_coop);
410 CFQ_CFQQ_FNS(deep);
411 CFQ_CFQQ_FNS(wait_busy);
412 #undef CFQ_CFQQ_FNS
413 
414 static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
415 {
416  return pd ? container_of(pd, struct cfq_group, pd) : NULL;
417 }
418 
419 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
420 {
421  return pd_to_blkg(&cfqg->pd);
422 }
423 
424 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
425 
426 /* cfqg stats flags */
427 enum cfqg_stats_flags {
428  CFQG_stats_waiting = 0,
429  CFQG_stats_idling,
430  CFQG_stats_empty,
431 };
432 
433 #define CFQG_FLAG_FNS(name) \
434 static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
435 { \
436  stats->flags |= (1 << CFQG_stats_##name); \
437 } \
438 static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
439 { \
440  stats->flags &= ~(1 << CFQG_stats_##name); \
441 } \
442 static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
443 { \
444  return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
445 } \
446 
447 CFQG_FLAG_FNS(waiting)
448 CFQG_FLAG_FNS(idling)
449 CFQG_FLAG_FNS(empty)
450 #undef CFQG_FLAG_FNS
451 
452 /* This should be called with the queue_lock held. */
453 static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
454 {
455  unsigned long long now;
456 
457  if (!cfqg_stats_waiting(stats))
458  return;
459 
460  now = sched_clock();
461  if (time_after64(now, stats->start_group_wait_time))
462  blkg_stat_add(&stats->group_wait_time,
463  now - stats->start_group_wait_time);
464  cfqg_stats_clear_waiting(stats);
465 }
466 
467 /* This should be called with the queue_lock held. */
468 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
469  struct cfq_group *curr_cfqg)
470 {
471  struct cfqg_stats *stats = &cfqg->stats;
472 
473  if (cfqg_stats_waiting(stats))
474  return;
475  if (cfqg == curr_cfqg)
476  return;
477  stats->start_group_wait_time = sched_clock();
478  cfqg_stats_mark_waiting(stats);
479 }
480 
481 /* This should be called with the queue_lock held. */
482 static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
483 {
484  unsigned long long now;
485 
486  if (!cfqg_stats_empty(stats))
487  return;
488 
489  now = sched_clock();
490  if (time_after64(now, stats->start_empty_time))
491  blkg_stat_add(&stats->empty_time,
492  now - stats->start_empty_time);
493  cfqg_stats_clear_empty(stats);
494 }
495 
496 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
497 {
498  blkg_stat_add(&cfqg->stats.dequeue, 1);
499 }
500 
501 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
502 {
503  struct cfqg_stats *stats = &cfqg->stats;
504 
505  if (blkg_rwstat_sum(&stats->queued))
506  return;
507 
508  /*
509  * group is already marked empty. This can happen if cfqq got new
510  * request in parent group and moved to this group while being added
511  * to service tree. Just ignore the event and move on.
512  */
513  if (cfqg_stats_empty(stats))
514  return;
515 
516  stats->start_empty_time = sched_clock();
517  cfqg_stats_mark_empty(stats);
518 }
519 
520 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
521 {
522  struct cfqg_stats *stats = &cfqg->stats;
523 
524  if (cfqg_stats_idling(stats)) {
525  unsigned long long now = sched_clock();
526 
527  if (time_after64(now, stats->start_idle_time))
528  blkg_stat_add(&stats->idle_time,
529  now - stats->start_idle_time);
530  cfqg_stats_clear_idling(stats);
531  }
532 }
533 
534 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
535 {
536  struct cfqg_stats *stats = &cfqg->stats;
537 
538  BUG_ON(cfqg_stats_idling(stats));
539 
540  stats->start_idle_time = sched_clock();
541  cfqg_stats_mark_idling(stats);
542 }
543 
544 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
545 {
546  struct cfqg_stats *stats = &cfqg->stats;
547 
548  blkg_stat_add(&stats->avg_queue_size_sum,
549  blkg_rwstat_sum(&stats->queued));
550  blkg_stat_add(&stats->avg_queue_size_samples, 1);
551  cfqg_stats_update_group_wait_time(stats);
552 }
553 
554 #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
555 
556 static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
557 static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
558 static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
559 static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
560 static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
561 static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
562 static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
563 
564 #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
565 
566 #ifdef CONFIG_CFQ_GROUP_IOSCHED
567 
568 static struct blkcg_policy blkcg_policy_cfq;
569 
570 static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
571 {
572  return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
573 }
574 
575 static inline void cfqg_get(struct cfq_group *cfqg)
576 {
577  return blkg_get(cfqg_to_blkg(cfqg));
578 }
579 
580 static inline void cfqg_put(struct cfq_group *cfqg)
581 {
582  return blkg_put(cfqg_to_blkg(cfqg));
583 }
584 
585 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
586  char __pbuf[128]; \
587  \
588  blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
589  blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
590  cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
591  __pbuf, ##args); \
592 } while (0)
593 
594 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
595  char __pbuf[128]; \
596  \
597  blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
598  blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
599 } while (0)
600 
601 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
602  struct cfq_group *curr_cfqg, int rw)
603 {
604  blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
605  cfqg_stats_end_empty_time(&cfqg->stats);
606  cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
607 }
608 
609 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
610  unsigned long time, unsigned long unaccounted_time)
611 {
612  blkg_stat_add(&cfqg->stats.time, time);
613 #ifdef CONFIG_DEBUG_BLK_CGROUP
614  blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
615 #endif
616 }
617 
618 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
619 {
620  blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
621 }
622 
623 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
624 {
625  blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
626 }
627 
628 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
629  uint64_t bytes, int rw)
630 {
631  blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
632  blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
633  blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
634 }
635 
636 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
637  uint64_t start_time, uint64_t io_start_time, int rw)
638 {
639  struct cfqg_stats *stats = &cfqg->stats;
640  unsigned long long now = sched_clock();
641 
642  if (time_after64(now, io_start_time))
643  blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
644  if (time_after64(io_start_time, start_time))
645  blkg_rwstat_add(&stats->wait_time, rw,
646  io_start_time - start_time);
647 }
648 
649 static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
650 {
651  struct cfq_group *cfqg = blkg_to_cfqg(blkg);
652  struct cfqg_stats *stats = &cfqg->stats;
653 
654  /* queued stats shouldn't be cleared */
655  blkg_rwstat_reset(&stats->service_bytes);
656  blkg_rwstat_reset(&stats->serviced);
657  blkg_rwstat_reset(&stats->merged);
658  blkg_rwstat_reset(&stats->service_time);
659  blkg_rwstat_reset(&stats->wait_time);
660  blkg_stat_reset(&stats->time);
661 #ifdef CONFIG_DEBUG_BLK_CGROUP
662  blkg_stat_reset(&stats->unaccounted_time);
663  blkg_stat_reset(&stats->avg_queue_size_sum);
664  blkg_stat_reset(&stats->avg_queue_size_samples);
665  blkg_stat_reset(&stats->dequeue);
666  blkg_stat_reset(&stats->group_wait_time);
667  blkg_stat_reset(&stats->idle_time);
668  blkg_stat_reset(&stats->empty_time);
669 #endif
670 }
671 
672 #else /* CONFIG_CFQ_GROUP_IOSCHED */
673 
674 static inline void cfqg_get(struct cfq_group *cfqg) { }
675 static inline void cfqg_put(struct cfq_group *cfqg) { }
676 
677 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
678  blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
679 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
680 
681 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
682  struct cfq_group *curr_cfqg, int rw) { }
683 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
684  unsigned long time, unsigned long unaccounted_time) { }
685 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
686 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
687 static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
688  uint64_t bytes, int rw) { }
689 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
690  uint64_t start_time, uint64_t io_start_time, int rw) { }
691 
692 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
693 
694 #define cfq_log(cfqd, fmt, args...) \
695  blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
696 
697 /* Traverses through cfq group service trees */
698 #define for_each_cfqg_st(cfqg, i, j, st) \
699  for (i = 0; i <= IDLE_WORKLOAD; i++) \
700  for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
701  : &cfqg->service_tree_idle; \
702  (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
703  (i == IDLE_WORKLOAD && j == 0); \
704  j++, st = i < IDLE_WORKLOAD ? \
705  &cfqg->service_trees[i][j]: NULL) \
706 
707 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
708  struct cfq_ttime *ttime, bool group_idle)
709 {
710  unsigned long slice;
711  if (!sample_valid(ttime->ttime_samples))
712  return false;
713  if (group_idle)
714  slice = cfqd->cfq_group_idle;
715  else
716  slice = cfqd->cfq_slice_idle;
717  return ttime->ttime_mean > slice;
718 }
719 
720 static inline bool iops_mode(struct cfq_data *cfqd)
721 {
722  /*
723  * If we are not idling on queues and it is a NCQ drive, parallel
724  * execution of requests is on and measuring time is not possible
725  * in most of the cases until and unless we drive shallower queue
726  * depths and that becomes a performance bottleneck. In such cases
727  * switch to start providing fairness in terms of number of IOs.
728  */
729  if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
730  return true;
731  else
732  return false;
733 }
734 
735 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
736 {
737  if (cfq_class_idle(cfqq))
738  return IDLE_WORKLOAD;
739  if (cfq_class_rt(cfqq))
740  return RT_WORKLOAD;
741  return BE_WORKLOAD;
742 }
743 
744 
745 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
746 {
747  if (!cfq_cfqq_sync(cfqq))
748  return ASYNC_WORKLOAD;
749  if (!cfq_cfqq_idle_window(cfqq))
750  return SYNC_NOIDLE_WORKLOAD;
751  return SYNC_WORKLOAD;
752 }
753 
754 static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
755  struct cfq_data *cfqd,
756  struct cfq_group *cfqg)
757 {
758  if (wl == IDLE_WORKLOAD)
759  return cfqg->service_tree_idle.count;
760 
761  return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
762  + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
763  + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
764 }
765 
766 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
767  struct cfq_group *cfqg)
768 {
769  return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
770  + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
771 }
772 
773 static void cfq_dispatch_insert(struct request_queue *, struct request *);
774 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
775  struct cfq_io_cq *cic, struct bio *bio,
776  gfp_t gfp_mask);
777 
778 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
779 {
780  /* cic->icq is the first member, %NULL will convert to %NULL */
781  return container_of(icq, struct cfq_io_cq, icq);
782 }
783 
784 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
785  struct io_context *ioc)
786 {
787  if (ioc)
788  return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
789  return NULL;
790 }
791 
792 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
793 {
794  return cic->cfqq[is_sync];
795 }
796 
797 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
798  bool is_sync)
799 {
800  cic->cfqq[is_sync] = cfqq;
801 }
802 
803 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
804 {
805  return cic->icq.q->elevator->elevator_data;
806 }
807 
808 /*
809  * We regard a request as SYNC, if it's either a read or has the SYNC bit
810  * set (in which case it could also be direct WRITE).
811  */
812 static inline bool cfq_bio_sync(struct bio *bio)
813 {
814  return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
815 }
816 
817 /*
818  * scheduler run of queue, if there are requests pending and no one in the
819  * driver that will restart queueing
820  */
821 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
822 {
823  if (cfqd->busy_queues) {
824  cfq_log(cfqd, "schedule dispatch");
825  kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
826  }
827 }
828 
829 /*
830  * Scale schedule slice based on io priority. Use the sync time slice only
831  * if a queue is marked sync and has sync io queued. A sync queue with async
832  * io only, should not get full sync slice length.
833  */
834 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
835  unsigned short prio)
836 {
837  const int base_slice = cfqd->cfq_slice[sync];
838 
839  WARN_ON(prio >= IOPRIO_BE_NR);
840 
841  return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
842 }
843 
844 static inline int
845 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
846 {
847  return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
848 }
849 
850 static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
851 {
852  u64 d = delta << CFQ_SERVICE_SHIFT;
853 
854  d = d * CFQ_WEIGHT_DEFAULT;
855  do_div(d, cfqg->weight);
856  return d;
857 }
858 
859 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
860 {
861  s64 delta = (s64)(vdisktime - min_vdisktime);
862  if (delta > 0)
863  min_vdisktime = vdisktime;
864 
865  return min_vdisktime;
866 }
867 
868 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
869 {
870  s64 delta = (s64)(vdisktime - min_vdisktime);
871  if (delta < 0)
872  min_vdisktime = vdisktime;
873 
874  return min_vdisktime;
875 }
876 
877 static void update_min_vdisktime(struct cfq_rb_root *st)
878 {
879  struct cfq_group *cfqg;
880 
881  if (st->left) {
882  cfqg = rb_entry_cfqg(st->left);
883  st->min_vdisktime = max_vdisktime(st->min_vdisktime,
884  cfqg->vdisktime);
885  }
886 }
887 
888 /*
889  * get averaged number of queues of RT/BE priority.
890  * average is updated, with a formula that gives more weight to higher numbers,
891  * to quickly follows sudden increases and decrease slowly
892  */
893 
894 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
895  struct cfq_group *cfqg, bool rt)
896 {
897  unsigned min_q, max_q;
898  unsigned mult = cfq_hist_divisor - 1;
899  unsigned round = cfq_hist_divisor / 2;
900  unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
901 
902  min_q = min(cfqg->busy_queues_avg[rt], busy);
903  max_q = max(cfqg->busy_queues_avg[rt], busy);
904  cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
905  cfq_hist_divisor;
906  return cfqg->busy_queues_avg[rt];
907 }
908 
909 static inline unsigned
910 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
911 {
912  struct cfq_rb_root *st = &cfqd->grp_service_tree;
913 
914  return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
915 }
916 
917 static inline unsigned
918 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
919 {
920  unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
921  if (cfqd->cfq_latency) {
922  /*
923  * interested queues (we consider only the ones with the same
924  * priority class in the cfq group)
925  */
926  unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
927  cfq_class_rt(cfqq));
928  unsigned sync_slice = cfqd->cfq_slice[1];
929  unsigned expect_latency = sync_slice * iq;
930  unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
931 
932  if (expect_latency > group_slice) {
933  unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
934  /* scale low_slice according to IO priority
935  * and sync vs async */
936  unsigned low_slice =
937  min(slice, base_low_slice * slice / sync_slice);
938  /* the adapted slice value is scaled to fit all iqs
939  * into the target latency */
940  slice = max(slice * group_slice / expect_latency,
941  low_slice);
942  }
943  }
944  return slice;
945 }
946 
947 static inline void
948 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
949 {
950  unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
951 
952  cfqq->slice_start = jiffies;
953  cfqq->slice_end = jiffies + slice;
954  cfqq->allocated_slice = slice;
955  cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
956 }
957 
958 /*
959  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
960  * isn't valid until the first request from the dispatch is activated
961  * and the slice time set.
962  */
963 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
964 {
965  if (cfq_cfqq_slice_new(cfqq))
966  return false;
967  if (time_before(jiffies, cfqq->slice_end))
968  return false;
969 
970  return true;
971 }
972 
973 /*
974  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
975  * We choose the request that is closest to the head right now. Distance
976  * behind the head is penalized and only allowed to a certain extent.
977  */
978 static struct request *
979 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
980 {
981  sector_t s1, s2, d1 = 0, d2 = 0;
982  unsigned long back_max;
983 #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
984 #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
985  unsigned wrap = 0; /* bit mask: requests behind the disk head? */
986 
987  if (rq1 == NULL || rq1 == rq2)
988  return rq2;
989  if (rq2 == NULL)
990  return rq1;
991 
992  if (rq_is_sync(rq1) != rq_is_sync(rq2))
993  return rq_is_sync(rq1) ? rq1 : rq2;
994 
995  if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
996  return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
997 
998  s1 = blk_rq_pos(rq1);
999  s2 = blk_rq_pos(rq2);
1000 
1001  /*
1002  * by definition, 1KiB is 2 sectors
1003  */
1004  back_max = cfqd->cfq_back_max * 2;
1005 
1006  /*
1007  * Strict one way elevator _except_ in the case where we allow
1008  * short backward seeks which are biased as twice the cost of a
1009  * similar forward seek.
1010  */
1011  if (s1 >= last)
1012  d1 = s1 - last;
1013  else if (s1 + back_max >= last)
1014  d1 = (last - s1) * cfqd->cfq_back_penalty;
1015  else
1016  wrap |= CFQ_RQ1_WRAP;
1017 
1018  if (s2 >= last)
1019  d2 = s2 - last;
1020  else if (s2 + back_max >= last)
1021  d2 = (last - s2) * cfqd->cfq_back_penalty;
1022  else
1023  wrap |= CFQ_RQ2_WRAP;
1024 
1025  /* Found required data */
1026 
1027  /*
1028  * By doing switch() on the bit mask "wrap" we avoid having to
1029  * check two variables for all permutations: --> faster!
1030  */
1031  switch (wrap) {
1032  case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1033  if (d1 < d2)
1034  return rq1;
1035  else if (d2 < d1)
1036  return rq2;
1037  else {
1038  if (s1 >= s2)
1039  return rq1;
1040  else
1041  return rq2;
1042  }
1043 
1044  case CFQ_RQ2_WRAP:
1045  return rq1;
1046  case CFQ_RQ1_WRAP:
1047  return rq2;
1048  case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1049  default:
1050  /*
1051  * Since both rqs are wrapped,
1052  * start with the one that's further behind head
1053  * (--> only *one* back seek required),
1054  * since back seek takes more time than forward.
1055  */
1056  if (s1 <= s2)
1057  return rq1;
1058  else
1059  return rq2;
1060  }
1061 }
1062 
1063 /*
1064  * The below is leftmost cache rbtree addon
1065  */
1066 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1067 {
1068  /* Service tree is empty */
1069  if (!root->count)
1070  return NULL;
1071 
1072  if (!root->left)
1073  root->left = rb_first(&root->rb);
1074 
1075  if (root->left)
1076  return rb_entry(root->left, struct cfq_queue, rb_node);
1077 
1078  return NULL;
1079 }
1080 
1081 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1082 {
1083  if (!root->left)
1084  root->left = rb_first(&root->rb);
1085 
1086  if (root->left)
1087  return rb_entry_cfqg(root->left);
1088 
1089  return NULL;
1090 }
1091 
1092 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1093 {
1094  rb_erase(n, root);
1095  RB_CLEAR_NODE(n);
1096 }
1097 
1098 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1099 {
1100  if (root->left == n)
1101  root->left = NULL;
1102  rb_erase_init(n, &root->rb);
1103  --root->count;
1104 }
1105 
1106 /*
1107  * would be nice to take fifo expire time into account as well
1108  */
1109 static struct request *
1110 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1111  struct request *last)
1112 {
1113  struct rb_node *rbnext = rb_next(&last->rb_node);
1114  struct rb_node *rbprev = rb_prev(&last->rb_node);
1115  struct request *next = NULL, *prev = NULL;
1116 
1117  BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1118 
1119  if (rbprev)
1120  prev = rb_entry_rq(rbprev);
1121 
1122  if (rbnext)
1123  next = rb_entry_rq(rbnext);
1124  else {
1125  rbnext = rb_first(&cfqq->sort_list);
1126  if (rbnext && rbnext != &last->rb_node)
1127  next = rb_entry_rq(rbnext);
1128  }
1129 
1130  return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1131 }
1132 
1133 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1134  struct cfq_queue *cfqq)
1135 {
1136  /*
1137  * just an approximation, should be ok.
1138  */
1139  return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1140  cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1141 }
1142 
1143 static inline s64
1144 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1145 {
1146  return cfqg->vdisktime - st->min_vdisktime;
1147 }
1148 
1149 static void
1150 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1151 {
1152  struct rb_node **node = &st->rb.rb_node;
1153  struct rb_node *parent = NULL;
1154  struct cfq_group *__cfqg;
1155  s64 key = cfqg_key(st, cfqg);
1156  int left = 1;
1157 
1158  while (*node != NULL) {
1159  parent = *node;
1160  __cfqg = rb_entry_cfqg(parent);
1161 
1162  if (key < cfqg_key(st, __cfqg))
1163  node = &parent->rb_left;
1164  else {
1165  node = &parent->rb_right;
1166  left = 0;
1167  }
1168  }
1169 
1170  if (left)
1171  st->left = &cfqg->rb_node;
1172 
1173  rb_link_node(&cfqg->rb_node, parent, node);
1174  rb_insert_color(&cfqg->rb_node, &st->rb);
1175 }
1176 
1177 static void
1178 cfq_update_group_weight(struct cfq_group *cfqg)
1179 {
1180  BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1181  if (cfqg->new_weight) {
1182  cfqg->weight = cfqg->new_weight;
1183  cfqg->new_weight = 0;
1184  }
1185 }
1186 
1187 static void
1188 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1189 {
1190  BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1191 
1192  cfq_update_group_weight(cfqg);
1193  __cfq_group_service_tree_add(st, cfqg);
1194  st->total_weight += cfqg->weight;
1195 }
1196 
1197 static void
1198 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1199 {
1200  struct cfq_rb_root *st = &cfqd->grp_service_tree;
1201  struct cfq_group *__cfqg;
1202  struct rb_node *n;
1203 
1204  cfqg->nr_cfqq++;
1205  if (!RB_EMPTY_NODE(&cfqg->rb_node))
1206  return;
1207 
1208  /*
1209  * Currently put the group at the end. Later implement something
1210  * so that groups get lesser vtime based on their weights, so that
1211  * if group does not loose all if it was not continuously backlogged.
1212  */
1213  n = rb_last(&st->rb);
1214  if (n) {
1215  __cfqg = rb_entry_cfqg(n);
1216  cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1217  } else
1218  cfqg->vdisktime = st->min_vdisktime;
1219  cfq_group_service_tree_add(st, cfqg);
1220 }
1221 
1222 static void
1223 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1224 {
1225  st->total_weight -= cfqg->weight;
1226  if (!RB_EMPTY_NODE(&cfqg->rb_node))
1227  cfq_rb_erase(&cfqg->rb_node, st);
1228 }
1229 
1230 static void
1231 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1232 {
1233  struct cfq_rb_root *st = &cfqd->grp_service_tree;
1234 
1235  BUG_ON(cfqg->nr_cfqq < 1);
1236  cfqg->nr_cfqq--;
1237 
1238  /* If there are other cfq queues under this group, don't delete it */
1239  if (cfqg->nr_cfqq)
1240  return;
1241 
1242  cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1243  cfq_group_service_tree_del(st, cfqg);
1244  cfqg->saved_workload_slice = 0;
1245  cfqg_stats_update_dequeue(cfqg);
1246 }
1247 
1248 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1249  unsigned int *unaccounted_time)
1250 {
1251  unsigned int slice_used;
1252 
1253  /*
1254  * Queue got expired before even a single request completed or
1255  * got expired immediately after first request completion.
1256  */
1257  if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1258  /*
1259  * Also charge the seek time incurred to the group, otherwise
1260  * if there are mutiple queues in the group, each can dispatch
1261  * a single request on seeky media and cause lots of seek time
1262  * and group will never know it.
1263  */
1264  slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1265  1);
1266  } else {
1267  slice_used = jiffies - cfqq->slice_start;
1268  if (slice_used > cfqq->allocated_slice) {
1269  *unaccounted_time = slice_used - cfqq->allocated_slice;
1270  slice_used = cfqq->allocated_slice;
1271  }
1272  if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1273  *unaccounted_time += cfqq->slice_start -
1274  cfqq->dispatch_start;
1275  }
1276 
1277  return slice_used;
1278 }
1279 
1280 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1281  struct cfq_queue *cfqq)
1282 {
1283  struct cfq_rb_root *st = &cfqd->grp_service_tree;
1284  unsigned int used_sl, charge, unaccounted_sl = 0;
1285  int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1286  - cfqg->service_tree_idle.count;
1287 
1288  BUG_ON(nr_sync < 0);
1289  used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1290 
1291  if (iops_mode(cfqd))
1292  charge = cfqq->slice_dispatch;
1293  else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1294  charge = cfqq->allocated_slice;
1295 
1296  /* Can't update vdisktime while group is on service tree */
1297  cfq_group_service_tree_del(st, cfqg);
1298  cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
1299  /* If a new weight was requested, update now, off tree */
1300  cfq_group_service_tree_add(st, cfqg);
1301 
1302  /* This group is being expired. Save the context */
1303  if (time_after(cfqd->workload_expires, jiffies)) {
1305  - jiffies;
1306  cfqg->saved_workload = cfqd->serving_type;
1307  cfqg->saved_serving_prio = cfqd->serving_prio;
1308  } else
1309  cfqg->saved_workload_slice = 0;
1310 
1311  cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1312  st->min_vdisktime);
1313  cfq_log_cfqq(cfqq->cfqd, cfqq,
1314  "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1315  used_sl, cfqq->slice_dispatch, charge,
1316  iops_mode(cfqd), cfqq->nr_sectors);
1317  cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1318  cfqg_stats_set_start_empty_time(cfqg);
1319 }
1320 
1328 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1329 {
1330  struct cfq_rb_root *st;
1331  int i, j;
1332 
1333  for_each_cfqg_st(cfqg, i, j, st)
1334  *st = CFQ_RB_ROOT;
1335  RB_CLEAR_NODE(&cfqg->rb_node);
1336 
1337  cfqg->ttime.last_end_request = jiffies;
1338 }
1339 
1340 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1341 static void cfq_pd_init(struct blkcg_gq *blkg)
1342 {
1343  struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1344 
1345  cfq_init_cfqg_base(cfqg);
1346  cfqg->weight = blkg->blkcg->cfq_weight;
1347 }
1348 
1349 /*
1350  * Search for the cfq group current task belongs to. request_queue lock must
1351  * be held.
1352  */
1353 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1354  struct blkcg *blkcg)
1355 {
1356  struct request_queue *q = cfqd->queue;
1357  struct cfq_group *cfqg = NULL;
1358 
1359  /* avoid lookup for the common case where there's no blkcg */
1360  if (blkcg == &blkcg_root) {
1361  cfqg = cfqd->root_group;
1362  } else {
1363  struct blkcg_gq *blkg;
1364 
1365  blkg = blkg_lookup_create(blkcg, q);
1366  if (!IS_ERR(blkg))
1367  cfqg = blkg_to_cfqg(blkg);
1368  }
1369 
1370  return cfqg;
1371 }
1372 
1373 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1374 {
1375  /* Currently, all async queues are mapped to root group */
1376  if (!cfq_cfqq_sync(cfqq))
1377  cfqg = cfqq->cfqd->root_group;
1378 
1379  cfqq->cfqg = cfqg;
1380  /* cfqq reference on cfqg */
1381  cfqg_get(cfqg);
1382 }
1383 
1384 static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1385  struct blkg_policy_data *pd, int off)
1386 {
1387  struct cfq_group *cfqg = pd_to_cfqg(pd);
1388 
1389  if (!cfqg->dev_weight)
1390  return 0;
1391  return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1392 }
1393 
1394 static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1395  struct seq_file *sf)
1396 {
1397  blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1398  cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
1399  false);
1400  return 0;
1401 }
1402 
1403 static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1404  struct seq_file *sf)
1405 {
1406  seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
1407  return 0;
1408 }
1409 
1410 static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1411  const char *buf)
1412 {
1413  struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1414  struct blkg_conf_ctx ctx;
1415  struct cfq_group *cfqg;
1416  int ret;
1417 
1418  ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1419  if (ret)
1420  return ret;
1421 
1422  ret = -EINVAL;
1423  cfqg = blkg_to_cfqg(ctx.blkg);
1424  if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1425  cfqg->dev_weight = ctx.v;
1426  cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
1427  ret = 0;
1428  }
1429 
1431  return ret;
1432 }
1433 
1434 static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1435 {
1436  struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1437  struct blkcg_gq *blkg;
1438  struct hlist_node *n;
1439 
1440  if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1441  return -EINVAL;
1442 
1443  spin_lock_irq(&blkcg->lock);
1444  blkcg->cfq_weight = (unsigned int)val;
1445 
1446  hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1447  struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1448 
1449  if (cfqg && !cfqg->dev_weight)
1450  cfqg->new_weight = blkcg->cfq_weight;
1451  }
1452 
1453  spin_unlock_irq(&blkcg->lock);
1454  return 0;
1455 }
1456 
1457 static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1458  struct seq_file *sf)
1459 {
1460  struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1461 
1462  blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
1463  cft->private, false);
1464  return 0;
1465 }
1466 
1467 static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
1468  struct seq_file *sf)
1469 {
1470  struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1471 
1472  blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
1473  cft->private, true);
1474  return 0;
1475 }
1476 
1477 #ifdef CONFIG_DEBUG_BLK_CGROUP
1478 static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1479  struct blkg_policy_data *pd, int off)
1480 {
1481  struct cfq_group *cfqg = pd_to_cfqg(pd);
1482  u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1483  u64 v = 0;
1484 
1485  if (samples) {
1486  v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1487  do_div(v, samples);
1488  }
1489  __blkg_prfill_u64(sf, pd, v);
1490  return 0;
1491 }
1492 
1493 /* print avg_queue_size */
1494 static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1495  struct seq_file *sf)
1496 {
1497  struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1498 
1499  blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
1500  &blkcg_policy_cfq, 0, false);
1501  return 0;
1502 }
1503 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1504 
1505 static struct cftype cfq_blkcg_files[] = {
1506  {
1507  .name = "weight_device",
1508  .read_seq_string = cfqg_print_weight_device,
1509  .write_string = cfqg_set_weight_device,
1510  .max_write_len = 256,
1511  },
1512  {
1513  .name = "weight",
1514  .read_seq_string = cfq_print_weight,
1515  .write_u64 = cfq_set_weight,
1516  },
1517  {
1518  .name = "time",
1519  .private = offsetof(struct cfq_group, stats.time),
1520  .read_seq_string = cfqg_print_stat,
1521  },
1522  {
1523  .name = "sectors",
1524  .private = offsetof(struct cfq_group, stats.sectors),
1525  .read_seq_string = cfqg_print_stat,
1526  },
1527  {
1528  .name = "io_service_bytes",
1529  .private = offsetof(struct cfq_group, stats.service_bytes),
1530  .read_seq_string = cfqg_print_rwstat,
1531  },
1532  {
1533  .name = "io_serviced",
1534  .private = offsetof(struct cfq_group, stats.serviced),
1535  .read_seq_string = cfqg_print_rwstat,
1536  },
1537  {
1538  .name = "io_service_time",
1539  .private = offsetof(struct cfq_group, stats.service_time),
1540  .read_seq_string = cfqg_print_rwstat,
1541  },
1542  {
1543  .name = "io_wait_time",
1544  .private = offsetof(struct cfq_group, stats.wait_time),
1545  .read_seq_string = cfqg_print_rwstat,
1546  },
1547  {
1548  .name = "io_merged",
1549  .private = offsetof(struct cfq_group, stats.merged),
1550  .read_seq_string = cfqg_print_rwstat,
1551  },
1552  {
1553  .name = "io_queued",
1554  .private = offsetof(struct cfq_group, stats.queued),
1555  .read_seq_string = cfqg_print_rwstat,
1556  },
1557 #ifdef CONFIG_DEBUG_BLK_CGROUP
1558  {
1559  .name = "avg_queue_size",
1560  .read_seq_string = cfqg_print_avg_queue_size,
1561  },
1562  {
1563  .name = "group_wait_time",
1564  .private = offsetof(struct cfq_group, stats.group_wait_time),
1565  .read_seq_string = cfqg_print_stat,
1566  },
1567  {
1568  .name = "idle_time",
1569  .private = offsetof(struct cfq_group, stats.idle_time),
1570  .read_seq_string = cfqg_print_stat,
1571  },
1572  {
1573  .name = "empty_time",
1574  .private = offsetof(struct cfq_group, stats.empty_time),
1575  .read_seq_string = cfqg_print_stat,
1576  },
1577  {
1578  .name = "dequeue",
1579  .private = offsetof(struct cfq_group, stats.dequeue),
1580  .read_seq_string = cfqg_print_stat,
1581  },
1582  {
1583  .name = "unaccounted_time",
1584  .private = offsetof(struct cfq_group, stats.unaccounted_time),
1585  .read_seq_string = cfqg_print_stat,
1586  },
1587 #endif /* CONFIG_DEBUG_BLK_CGROUP */
1588  { } /* terminate */
1589 };
1590 #else /* GROUP_IOSCHED */
1591 static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1592  struct blkcg *blkcg)
1593 {
1594  return cfqd->root_group;
1595 }
1596 
1597 static inline void
1598 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1599  cfqq->cfqg = cfqg;
1600 }
1601 
1602 #endif /* GROUP_IOSCHED */
1603 
1604 /*
1605  * The cfqd->service_trees holds all pending cfq_queue's that have
1606  * requests waiting to be processed. It is sorted in the order that
1607  * we will service the queues.
1608  */
1609 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1610  bool add_front)
1611 {
1612  struct rb_node **p, *parent;
1613  struct cfq_queue *__cfqq;
1614  unsigned long rb_key;
1615  struct cfq_rb_root *service_tree;
1616  int left;
1617  int new_cfqq = 1;
1618 
1619  service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1620  cfqq_type(cfqq));
1621  if (cfq_class_idle(cfqq)) {
1622  rb_key = CFQ_IDLE_DELAY;
1623  parent = rb_last(&service_tree->rb);
1624  if (parent && parent != &cfqq->rb_node) {
1625  __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1626  rb_key += __cfqq->rb_key;
1627  } else
1628  rb_key += jiffies;
1629  } else if (!add_front) {
1630  /*
1631  * Get our rb key offset. Subtract any residual slice
1632  * value carried from last service. A negative resid
1633  * count indicates slice overrun, and this should position
1634  * the next service time further away in the tree.
1635  */
1636  rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1637  rb_key -= cfqq->slice_resid;
1638  cfqq->slice_resid = 0;
1639  } else {
1640  rb_key = -HZ;
1641  __cfqq = cfq_rb_first(service_tree);
1642  rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1643  }
1644 
1645  if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1646  new_cfqq = 0;
1647  /*
1648  * same position, nothing more to do
1649  */
1650  if (rb_key == cfqq->rb_key &&
1651  cfqq->service_tree == service_tree)
1652  return;
1653 
1654  cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1655  cfqq->service_tree = NULL;
1656  }
1657 
1658  left = 1;
1659  parent = NULL;
1660  cfqq->service_tree = service_tree;
1661  p = &service_tree->rb.rb_node;
1662  while (*p) {
1663  struct rb_node **n;
1664 
1665  parent = *p;
1666  __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1667 
1668  /*
1669  * sort by key, that represents service time.
1670  */
1671  if (time_before(rb_key, __cfqq->rb_key))
1672  n = &(*p)->rb_left;
1673  else {
1674  n = &(*p)->rb_right;
1675  left = 0;
1676  }
1677 
1678  p = n;
1679  }
1680 
1681  if (left)
1682  service_tree->left = &cfqq->rb_node;
1683 
1684  cfqq->rb_key = rb_key;
1685  rb_link_node(&cfqq->rb_node, parent, p);
1686  rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1687  service_tree->count++;
1688  if (add_front || !new_cfqq)
1689  return;
1690  cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1691 }
1692 
1693 static struct cfq_queue *
1694 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1695  sector_t sector, struct rb_node **ret_parent,
1696  struct rb_node ***rb_link)
1697 {
1698  struct rb_node **p, *parent;
1699  struct cfq_queue *cfqq = NULL;
1700 
1701  parent = NULL;
1702  p = &root->rb_node;
1703  while (*p) {
1704  struct rb_node **n;
1705 
1706  parent = *p;
1707  cfqq = rb_entry(parent, struct cfq_queue, p_node);
1708 
1709  /*
1710  * Sort strictly based on sector. Smallest to the left,
1711  * largest to the right.
1712  */
1713  if (sector > blk_rq_pos(cfqq->next_rq))
1714  n = &(*p)->rb_right;
1715  else if (sector < blk_rq_pos(cfqq->next_rq))
1716  n = &(*p)->rb_left;
1717  else
1718  break;
1719  p = n;
1720  cfqq = NULL;
1721  }
1722 
1723  *ret_parent = parent;
1724  if (rb_link)
1725  *rb_link = p;
1726  return cfqq;
1727 }
1728 
1729 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1730 {
1731  struct rb_node **p, *parent;
1732  struct cfq_queue *__cfqq;
1733 
1734  if (cfqq->p_root) {
1735  rb_erase(&cfqq->p_node, cfqq->p_root);
1736  cfqq->p_root = NULL;
1737  }
1738 
1739  if (cfq_class_idle(cfqq))
1740  return;
1741  if (!cfqq->next_rq)
1742  return;
1743 
1744  cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1745  __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1746  blk_rq_pos(cfqq->next_rq), &parent, &p);
1747  if (!__cfqq) {
1748  rb_link_node(&cfqq->p_node, parent, p);
1749  rb_insert_color(&cfqq->p_node, cfqq->p_root);
1750  } else
1751  cfqq->p_root = NULL;
1752 }
1753 
1754 /*
1755  * Update cfqq's position in the service tree.
1756  */
1757 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1758 {
1759  /*
1760  * Resorting requires the cfqq to be on the RR list already.
1761  */
1762  if (cfq_cfqq_on_rr(cfqq)) {
1763  cfq_service_tree_add(cfqd, cfqq, 0);
1764  cfq_prio_tree_add(cfqd, cfqq);
1765  }
1766 }
1767 
1768 /*
1769  * add to busy list of queues for service, trying to be fair in ordering
1770  * the pending list according to last request service
1771  */
1772 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1773 {
1774  cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1775  BUG_ON(cfq_cfqq_on_rr(cfqq));
1776  cfq_mark_cfqq_on_rr(cfqq);
1777  cfqd->busy_queues++;
1778  if (cfq_cfqq_sync(cfqq))
1779  cfqd->busy_sync_queues++;
1780 
1781  cfq_resort_rr_list(cfqd, cfqq);
1782 }
1783 
1784 /*
1785  * Called when the cfqq no longer has requests pending, remove it from
1786  * the service tree.
1787  */
1788 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1789 {
1790  cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1791  BUG_ON(!cfq_cfqq_on_rr(cfqq));
1792  cfq_clear_cfqq_on_rr(cfqq);
1793 
1794  if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1795  cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1796  cfqq->service_tree = NULL;
1797  }
1798  if (cfqq->p_root) {
1799  rb_erase(&cfqq->p_node, cfqq->p_root);
1800  cfqq->p_root = NULL;
1801  }
1802 
1803  cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1804  BUG_ON(!cfqd->busy_queues);
1805  cfqd->busy_queues--;
1806  if (cfq_cfqq_sync(cfqq))
1807  cfqd->busy_sync_queues--;
1808 }
1809 
1810 /*
1811  * rb tree support functions
1812  */
1813 static void cfq_del_rq_rb(struct request *rq)
1814 {
1815  struct cfq_queue *cfqq = RQ_CFQQ(rq);
1816  const int sync = rq_is_sync(rq);
1817 
1818  BUG_ON(!cfqq->queued[sync]);
1819  cfqq->queued[sync]--;
1820 
1821  elv_rb_del(&cfqq->sort_list, rq);
1822 
1823  if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1824  /*
1825  * Queue will be deleted from service tree when we actually
1826  * expire it later. Right now just remove it from prio tree
1827  * as it is empty.
1828  */
1829  if (cfqq->p_root) {
1830  rb_erase(&cfqq->p_node, cfqq->p_root);
1831  cfqq->p_root = NULL;
1832  }
1833  }
1834 }
1835 
1836 static void cfq_add_rq_rb(struct request *rq)
1837 {
1838  struct cfq_queue *cfqq = RQ_CFQQ(rq);
1839  struct cfq_data *cfqd = cfqq->cfqd;
1840  struct request *prev;
1841 
1842  cfqq->queued[rq_is_sync(rq)]++;
1843 
1844  elv_rb_add(&cfqq->sort_list, rq);
1845 
1846  if (!cfq_cfqq_on_rr(cfqq))
1847  cfq_add_cfqq_rr(cfqd, cfqq);
1848 
1849  /*
1850  * check if this request is a better next-serve candidate
1851  */
1852  prev = cfqq->next_rq;
1853  cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1854 
1855  /*
1856  * adjust priority tree position, if ->next_rq changes
1857  */
1858  if (prev != cfqq->next_rq)
1859  cfq_prio_tree_add(cfqd, cfqq);
1860 
1861  BUG_ON(!cfqq->next_rq);
1862 }
1863 
1864 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1865 {
1866  elv_rb_del(&cfqq->sort_list, rq);
1867  cfqq->queued[rq_is_sync(rq)]--;
1868  cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
1869  cfq_add_rq_rb(rq);
1870  cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
1871  rq->cmd_flags);
1872 }
1873 
1874 static struct request *
1875 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1876 {
1877  struct task_struct *tsk = current;
1878  struct cfq_io_cq *cic;
1879  struct cfq_queue *cfqq;
1880 
1881  cic = cfq_cic_lookup(cfqd, tsk->io_context);
1882  if (!cic)
1883  return NULL;
1884 
1885  cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1886  if (cfqq) {
1887  sector_t sector = bio->bi_sector + bio_sectors(bio);
1888 
1889  return elv_rb_find(&cfqq->sort_list, sector);
1890  }
1891 
1892  return NULL;
1893 }
1894 
1895 static void cfq_activate_request(struct request_queue *q, struct request *rq)
1896 {
1897  struct cfq_data *cfqd = q->elevator->elevator_data;
1898 
1899  cfqd->rq_in_driver++;
1900  cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1901  cfqd->rq_in_driver);
1902 
1903  cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1904 }
1905 
1906 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1907 {
1908  struct cfq_data *cfqd = q->elevator->elevator_data;
1909 
1910  WARN_ON(!cfqd->rq_in_driver);
1911  cfqd->rq_in_driver--;
1912  cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1913  cfqd->rq_in_driver);
1914 }
1915 
1916 static void cfq_remove_request(struct request *rq)
1917 {
1918  struct cfq_queue *cfqq = RQ_CFQQ(rq);
1919 
1920  if (cfqq->next_rq == rq)
1921  cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1922 
1923  list_del_init(&rq->queuelist);
1924  cfq_del_rq_rb(rq);
1925 
1926  cfqq->cfqd->rq_queued--;
1927  cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
1928  if (rq->cmd_flags & REQ_PRIO) {
1929  WARN_ON(!cfqq->prio_pending);
1930  cfqq->prio_pending--;
1931  }
1932 }
1933 
1934 static int cfq_merge(struct request_queue *q, struct request **req,
1935  struct bio *bio)
1936 {
1937  struct cfq_data *cfqd = q->elevator->elevator_data;
1938  struct request *__rq;
1939 
1940  __rq = cfq_find_rq_fmerge(cfqd, bio);
1941  if (__rq && elv_rq_merge_ok(__rq, bio)) {
1942  *req = __rq;
1943  return ELEVATOR_FRONT_MERGE;
1944  }
1945 
1946  return ELEVATOR_NO_MERGE;
1947 }
1948 
1949 static void cfq_merged_request(struct request_queue *q, struct request *req,
1950  int type)
1951 {
1952  if (type == ELEVATOR_FRONT_MERGE) {
1953  struct cfq_queue *cfqq = RQ_CFQQ(req);
1954 
1955  cfq_reposition_rq_rb(cfqq, req);
1956  }
1957 }
1958 
1959 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1960  struct bio *bio)
1961 {
1962  cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
1963 }
1964 
1965 static void
1966 cfq_merged_requests(struct request_queue *q, struct request *rq,
1967  struct request *next)
1968 {
1969  struct cfq_queue *cfqq = RQ_CFQQ(rq);
1970  struct cfq_data *cfqd = q->elevator->elevator_data;
1971 
1972  /*
1973  * reposition in fifo if next is older than rq
1974  */
1975  if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1976  time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1977  list_move(&rq->queuelist, &next->queuelist);
1978  rq_set_fifo_time(rq, rq_fifo_time(next));
1979  }
1980 
1981  if (cfqq->next_rq == next)
1982  cfqq->next_rq = rq;
1983  cfq_remove_request(next);
1984  cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
1985 
1986  cfqq = RQ_CFQQ(next);
1987  /*
1988  * all requests of this queue are merged to other queues, delete it
1989  * from the service tree. If it's the active_queue,
1990  * cfq_dispatch_requests() will choose to expire it or do idle
1991  */
1992  if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1993  cfqq != cfqd->active_queue)
1994  cfq_del_cfqq_rr(cfqd, cfqq);
1995 }
1996 
1997 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1998  struct bio *bio)
1999 {
2000  struct cfq_data *cfqd = q->elevator->elevator_data;
2001  struct cfq_io_cq *cic;
2002  struct cfq_queue *cfqq;
2003 
2004  /*
2005  * Disallow merge of a sync bio into an async request.
2006  */
2007  if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2008  return false;
2009 
2010  /*
2011  * Lookup the cfqq that this bio will be queued with and allow
2012  * merge only if rq is queued there.
2013  */
2014  cic = cfq_cic_lookup(cfqd, current->io_context);
2015  if (!cic)
2016  return false;
2017 
2018  cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2019  return cfqq == RQ_CFQQ(rq);
2020 }
2021 
2022 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2023 {
2024  del_timer(&cfqd->idle_slice_timer);
2025  cfqg_stats_update_idle_time(cfqq->cfqg);
2026 }
2027 
2028 static void __cfq_set_active_queue(struct cfq_data *cfqd,
2029  struct cfq_queue *cfqq)
2030 {
2031  if (cfqq) {
2032  cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
2033  cfqd->serving_prio, cfqd->serving_type);
2034  cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2035  cfqq->slice_start = 0;
2036  cfqq->dispatch_start = jiffies;
2037  cfqq->allocated_slice = 0;
2038  cfqq->slice_end = 0;
2039  cfqq->slice_dispatch = 0;
2040  cfqq->nr_sectors = 0;
2041 
2042  cfq_clear_cfqq_wait_request(cfqq);
2043  cfq_clear_cfqq_must_dispatch(cfqq);
2044  cfq_clear_cfqq_must_alloc_slice(cfqq);
2045  cfq_clear_cfqq_fifo_expire(cfqq);
2046  cfq_mark_cfqq_slice_new(cfqq);
2047 
2048  cfq_del_timer(cfqd, cfqq);
2049  }
2050 
2051  cfqd->active_queue = cfqq;
2052 }
2053 
2054 /*
2055  * current cfqq expired its slice (or was too idle), select new one
2056  */
2057 static void
2058 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2059  bool timed_out)
2060 {
2061  cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2062 
2063  if (cfq_cfqq_wait_request(cfqq))
2064  cfq_del_timer(cfqd, cfqq);
2065 
2066  cfq_clear_cfqq_wait_request(cfqq);
2067  cfq_clear_cfqq_wait_busy(cfqq);
2068 
2069  /*
2070  * If this cfqq is shared between multiple processes, check to
2071  * make sure that those processes are still issuing I/Os within
2072  * the mean seek distance. If not, it may be time to break the
2073  * queues apart again.
2074  */
2075  if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2076  cfq_mark_cfqq_split_coop(cfqq);
2077 
2078  /*
2079  * store what was left of this slice, if the queue idled/timed out
2080  */
2081  if (timed_out) {
2082  if (cfq_cfqq_slice_new(cfqq))
2083  cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2084  else
2085  cfqq->slice_resid = cfqq->slice_end - jiffies;
2086  cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2087  }
2088 
2089  cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2090 
2091  if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2092  cfq_del_cfqq_rr(cfqd, cfqq);
2093 
2094  cfq_resort_rr_list(cfqd, cfqq);
2095 
2096  if (cfqq == cfqd->active_queue)
2097  cfqd->active_queue = NULL;
2098 
2099  if (cfqd->active_cic) {
2100  put_io_context(cfqd->active_cic->icq.ioc);
2101  cfqd->active_cic = NULL;
2102  }
2103 }
2104 
2105 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2106 {
2107  struct cfq_queue *cfqq = cfqd->active_queue;
2108 
2109  if (cfqq)
2110  __cfq_slice_expired(cfqd, cfqq, timed_out);
2111 }
2112 
2113 /*
2114  * Get next queue for service. Unless we have a queue preemption,
2115  * we'll simply select the first cfqq in the service tree.
2116  */
2117 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2118 {
2119  struct cfq_rb_root *service_tree =
2120  service_tree_for(cfqd->serving_group, cfqd->serving_prio,
2121  cfqd->serving_type);
2122 
2123  if (!cfqd->rq_queued)
2124  return NULL;
2125 
2126  /* There is nothing to dispatch */
2127  if (!service_tree)
2128  return NULL;
2129  if (RB_EMPTY_ROOT(&service_tree->rb))
2130  return NULL;
2131  return cfq_rb_first(service_tree);
2132 }
2133 
2134 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2135 {
2136  struct cfq_group *cfqg;
2137  struct cfq_queue *cfqq;
2138  int i, j;
2139  struct cfq_rb_root *st;
2140 
2141  if (!cfqd->rq_queued)
2142  return NULL;
2143 
2144  cfqg = cfq_get_next_cfqg(cfqd);
2145  if (!cfqg)
2146  return NULL;
2147 
2148  for_each_cfqg_st(cfqg, i, j, st)
2149  if ((cfqq = cfq_rb_first(st)) != NULL)
2150  return cfqq;
2151  return NULL;
2152 }
2153 
2154 /*
2155  * Get and set a new active queue for service.
2156  */
2157 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2158  struct cfq_queue *cfqq)
2159 {
2160  if (!cfqq)
2161  cfqq = cfq_get_next_queue(cfqd);
2162 
2163  __cfq_set_active_queue(cfqd, cfqq);
2164  return cfqq;
2165 }
2166 
2167 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2168  struct request *rq)
2169 {
2170  if (blk_rq_pos(rq) >= cfqd->last_position)
2171  return blk_rq_pos(rq) - cfqd->last_position;
2172  else
2173  return cfqd->last_position - blk_rq_pos(rq);
2174 }
2175 
2176 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2177  struct request *rq)
2178 {
2179  return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2180 }
2181 
2182 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2183  struct cfq_queue *cur_cfqq)
2184 {
2185  struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2186  struct rb_node *parent, *node;
2187  struct cfq_queue *__cfqq;
2188  sector_t sector = cfqd->last_position;
2189 
2190  if (RB_EMPTY_ROOT(root))
2191  return NULL;
2192 
2193  /*
2194  * First, if we find a request starting at the end of the last
2195  * request, choose it.
2196  */
2197  __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2198  if (__cfqq)
2199  return __cfqq;
2200 
2201  /*
2202  * If the exact sector wasn't found, the parent of the NULL leaf
2203  * will contain the closest sector.
2204  */
2205  __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2206  if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2207  return __cfqq;
2208 
2209  if (blk_rq_pos(__cfqq->next_rq) < sector)
2210  node = rb_next(&__cfqq->p_node);
2211  else
2212  node = rb_prev(&__cfqq->p_node);
2213  if (!node)
2214  return NULL;
2215 
2216  __cfqq = rb_entry(node, struct cfq_queue, p_node);
2217  if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2218  return __cfqq;
2219 
2220  return NULL;
2221 }
2222 
2223 /*
2224  * cfqd - obvious
2225  * cur_cfqq - passed in so that we don't decide that the current queue is
2226  * closely cooperating with itself.
2227  *
2228  * So, basically we're assuming that that cur_cfqq has dispatched at least
2229  * one request, and that cfqd->last_position reflects a position on the disk
2230  * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2231  * assumption.
2232  */
2233 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2234  struct cfq_queue *cur_cfqq)
2235 {
2236  struct cfq_queue *cfqq;
2237 
2238  if (cfq_class_idle(cur_cfqq))
2239  return NULL;
2240  if (!cfq_cfqq_sync(cur_cfqq))
2241  return NULL;
2242  if (CFQQ_SEEKY(cur_cfqq))
2243  return NULL;
2244 
2245  /*
2246  * Don't search priority tree if it's the only queue in the group.
2247  */
2248  if (cur_cfqq->cfqg->nr_cfqq == 1)
2249  return NULL;
2250 
2251  /*
2252  * We should notice if some of the queues are cooperating, eg
2253  * working closely on the same area of the disk. In that case,
2254  * we can group them together and don't waste time idling.
2255  */
2256  cfqq = cfqq_close(cfqd, cur_cfqq);
2257  if (!cfqq)
2258  return NULL;
2259 
2260  /* If new queue belongs to different cfq_group, don't choose it */
2261  if (cur_cfqq->cfqg != cfqq->cfqg)
2262  return NULL;
2263 
2264  /*
2265  * It only makes sense to merge sync queues.
2266  */
2267  if (!cfq_cfqq_sync(cfqq))
2268  return NULL;
2269  if (CFQQ_SEEKY(cfqq))
2270  return NULL;
2271 
2272  /*
2273  * Do not merge queues of different priority classes
2274  */
2275  if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2276  return NULL;
2277 
2278  return cfqq;
2279 }
2280 
2281 /*
2282  * Determine whether we should enforce idle window for this queue.
2283  */
2284 
2285 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2286 {
2287  enum wl_prio_t prio = cfqq_prio(cfqq);
2288  struct cfq_rb_root *service_tree = cfqq->service_tree;
2289 
2290  BUG_ON(!service_tree);
2291  BUG_ON(!service_tree->count);
2292 
2293  if (!cfqd->cfq_slice_idle)
2294  return false;
2295 
2296  /* We never do for idle class queues. */
2297  if (prio == IDLE_WORKLOAD)
2298  return false;
2299 
2300  /* We do for queues that were marked with idle window flag. */
2301  if (cfq_cfqq_idle_window(cfqq) &&
2302  !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2303  return true;
2304 
2305  /*
2306  * Otherwise, we do only if they are the last ones
2307  * in their service tree.
2308  */
2309  if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
2310  !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
2311  return true;
2312  cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
2313  service_tree->count);
2314  return false;
2315 }
2316 
2317 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2318 {
2319  struct cfq_queue *cfqq = cfqd->active_queue;
2320  struct cfq_io_cq *cic;
2321  unsigned long sl, group_idle = 0;
2322 
2323  /*
2324  * SSD device without seek penalty, disable idling. But only do so
2325  * for devices that support queuing, otherwise we still have a problem
2326  * with sync vs async workloads.
2327  */
2328  if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2329  return;
2330 
2331  WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2332  WARN_ON(cfq_cfqq_slice_new(cfqq));
2333 
2334  /*
2335  * idle is disabled, either manually or by past process history
2336  */
2337  if (!cfq_should_idle(cfqd, cfqq)) {
2338  /* no queue idling. Check for group idling */
2339  if (cfqd->cfq_group_idle)
2340  group_idle = cfqd->cfq_group_idle;
2341  else
2342  return;
2343  }
2344 
2345  /*
2346  * still active requests from this queue, don't idle
2347  */
2348  if (cfqq->dispatched)
2349  return;
2350 
2351  /*
2352  * task has exited, don't wait
2353  */
2354  cic = cfqd->active_cic;
2355  if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2356  return;
2357 
2358  /*
2359  * If our average think time is larger than the remaining time
2360  * slice, then don't idle. This avoids overrunning the allotted
2361  * time slice.
2362  */
2363  if (sample_valid(cic->ttime.ttime_samples) &&
2364  (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2365  cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2366  cic->ttime.ttime_mean);
2367  return;
2368  }
2369 
2370  /* There are other queues in the group, don't do group idle */
2371  if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2372  return;
2373 
2374  cfq_mark_cfqq_wait_request(cfqq);
2375 
2376  if (group_idle)
2377  sl = cfqd->cfq_group_idle;
2378  else
2379  sl = cfqd->cfq_slice_idle;
2380 
2381  mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2382  cfqg_stats_set_start_idle_time(cfqq->cfqg);
2383  cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2384  group_idle ? 1 : 0);
2385 }
2386 
2387 /*
2388  * Move request from internal lists to the request queue dispatch list.
2389  */
2390 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2391 {
2392  struct cfq_data *cfqd = q->elevator->elevator_data;
2393  struct cfq_queue *cfqq = RQ_CFQQ(rq);
2394 
2395  cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2396 
2397  cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2398  cfq_remove_request(rq);
2399  cfqq->dispatched++;
2400  (RQ_CFQG(rq))->dispatched++;
2401  elv_dispatch_sort(q, rq);
2402 
2403  cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2404  cfqq->nr_sectors += blk_rq_sectors(rq);
2405  cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2406 }
2407 
2408 /*
2409  * return expired entry, or NULL to just start from scratch in rbtree
2410  */
2411 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2412 {
2413  struct request *rq = NULL;
2414 
2415  if (cfq_cfqq_fifo_expire(cfqq))
2416  return NULL;
2417 
2418  cfq_mark_cfqq_fifo_expire(cfqq);
2419 
2420  if (list_empty(&cfqq->fifo))
2421  return NULL;
2422 
2423  rq = rq_entry_fifo(cfqq->fifo.next);
2424  if (time_before(jiffies, rq_fifo_time(rq)))
2425  rq = NULL;
2426 
2427  cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2428  return rq;
2429 }
2430 
2431 static inline int
2432 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2433 {
2434  const int base_rq = cfqd->cfq_slice_async_rq;
2435 
2436  WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2437 
2438  return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2439 }
2440 
2441 /*
2442  * Must be called with the queue_lock held.
2443  */
2444 static int cfqq_process_refs(struct cfq_queue *cfqq)
2445 {
2446  int process_refs, io_refs;
2447 
2448  io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2449  process_refs = cfqq->ref - io_refs;
2450  BUG_ON(process_refs < 0);
2451  return process_refs;
2452 }
2453 
2454 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2455 {
2456  int process_refs, new_process_refs;
2457  struct cfq_queue *__cfqq;
2458 
2459  /*
2460  * If there are no process references on the new_cfqq, then it is
2461  * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2462  * chain may have dropped their last reference (not just their
2463  * last process reference).
2464  */
2465  if (!cfqq_process_refs(new_cfqq))
2466  return;
2467 
2468  /* Avoid a circular list and skip interim queue merges */
2469  while ((__cfqq = new_cfqq->new_cfqq)) {
2470  if (__cfqq == cfqq)
2471  return;
2472  new_cfqq = __cfqq;
2473  }
2474 
2475  process_refs = cfqq_process_refs(cfqq);
2476  new_process_refs = cfqq_process_refs(new_cfqq);
2477  /*
2478  * If the process for the cfqq has gone away, there is no
2479  * sense in merging the queues.
2480  */
2481  if (process_refs == 0 || new_process_refs == 0)
2482  return;
2483 
2484  /*
2485  * Merge in the direction of the lesser amount of work.
2486  */
2487  if (new_process_refs >= process_refs) {
2488  cfqq->new_cfqq = new_cfqq;
2489  new_cfqq->ref += process_refs;
2490  } else {
2491  new_cfqq->new_cfqq = cfqq;
2492  cfqq->ref += new_process_refs;
2493  }
2494 }
2495 
2496 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2497  struct cfq_group *cfqg, enum wl_prio_t prio)
2498 {
2499  struct cfq_queue *queue;
2500  int i;
2501  bool key_valid = false;
2502  unsigned long lowest_key = 0;
2503  enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2504 
2505  for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2506  /* select the one with lowest rb_key */
2507  queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2508  if (queue &&
2509  (!key_valid || time_before(queue->rb_key, lowest_key))) {
2510  lowest_key = queue->rb_key;
2511  cur_best = i;
2512  key_valid = true;
2513  }
2514  }
2515 
2516  return cur_best;
2517 }
2518 
2519 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2520 {
2521  unsigned slice;
2522  unsigned count;
2523  struct cfq_rb_root *st;
2524  unsigned group_slice;
2525  enum wl_prio_t original_prio = cfqd->serving_prio;
2526 
2527  /* Choose next priority. RT > BE > IDLE */
2528  if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2529  cfqd->serving_prio = RT_WORKLOAD;
2530  else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2531  cfqd->serving_prio = BE_WORKLOAD;
2532  else {
2533  cfqd->serving_prio = IDLE_WORKLOAD;
2534  cfqd->workload_expires = jiffies + 1;
2535  return;
2536  }
2537 
2538  if (original_prio != cfqd->serving_prio)
2539  goto new_workload;
2540 
2541  /*
2542  * For RT and BE, we have to choose also the type
2543  * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2544  * expiration time
2545  */
2546  st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2547  count = st->count;
2548 
2549  /*
2550  * check workload expiration, and that we still have other queues ready
2551  */
2552  if (count && !time_after(jiffies, cfqd->workload_expires))
2553  return;
2554 
2555 new_workload:
2556  /* otherwise select new workload type */
2557  cfqd->serving_type =
2558  cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2559  st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2560  count = st->count;
2561 
2562  /*
2563  * the workload slice is computed as a fraction of target latency
2564  * proportional to the number of queues in that workload, over
2565  * all the queues in the same priority class
2566  */
2567  group_slice = cfq_group_slice(cfqd, cfqg);
2568 
2569  slice = group_slice * count /
2570  max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2571  cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2572 
2573  if (cfqd->serving_type == ASYNC_WORKLOAD) {
2574  unsigned int tmp;
2575 
2576  /*
2577  * Async queues are currently system wide. Just taking
2578  * proportion of queues with-in same group will lead to higher
2579  * async ratio system wide as generally root group is going
2580  * to have higher weight. A more accurate thing would be to
2581  * calculate system wide asnc/sync ratio.
2582  */
2583  tmp = cfqd->cfq_target_latency *
2584  cfqg_busy_async_queues(cfqd, cfqg);
2585  tmp = tmp/cfqd->busy_queues;
2586  slice = min_t(unsigned, slice, tmp);
2587 
2588  /* async workload slice is scaled down according to
2589  * the sync/async slice ratio. */
2590  slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2591  } else
2592  /* sync workload slice is at least 2 * cfq_slice_idle */
2593  slice = max(slice, 2 * cfqd->cfq_slice_idle);
2594 
2595  slice = max_t(unsigned, slice, CFQ_MIN_TT);
2596  cfq_log(cfqd, "workload slice:%d", slice);
2597  cfqd->workload_expires = jiffies + slice;
2598 }
2599 
2600 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2601 {
2602  struct cfq_rb_root *st = &cfqd->grp_service_tree;
2603  struct cfq_group *cfqg;
2604 
2605  if (RB_EMPTY_ROOT(&st->rb))
2606  return NULL;
2607  cfqg = cfq_rb_first_group(st);
2608  update_min_vdisktime(st);
2609  return cfqg;
2610 }
2611 
2612 static void cfq_choose_cfqg(struct cfq_data *cfqd)
2613 {
2614  struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2615 
2616  cfqd->serving_group = cfqg;
2617 
2618  /* Restore the workload type data */
2619  if (cfqg->saved_workload_slice) {
2621  cfqd->serving_type = cfqg->saved_workload;
2622  cfqd->serving_prio = cfqg->saved_serving_prio;
2623  } else
2624  cfqd->workload_expires = jiffies - 1;
2625 
2626  choose_service_tree(cfqd, cfqg);
2627 }
2628 
2629 /*
2630  * Select a queue for service. If we have a current active queue,
2631  * check whether to continue servicing it, or retrieve and set a new one.
2632  */
2633 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2634 {
2635  struct cfq_queue *cfqq, *new_cfqq = NULL;
2636 
2637  cfqq = cfqd->active_queue;
2638  if (!cfqq)
2639  goto new_queue;
2640 
2641  if (!cfqd->rq_queued)
2642  return NULL;
2643 
2644  /*
2645  * We were waiting for group to get backlogged. Expire the queue
2646  */
2647  if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2648  goto expire;
2649 
2650  /*
2651  * The active queue has run out of time, expire it and select new.
2652  */
2653  if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2654  /*
2655  * If slice had not expired at the completion of last request
2656  * we might not have turned on wait_busy flag. Don't expire
2657  * the queue yet. Allow the group to get backlogged.
2658  *
2659  * The very fact that we have used the slice, that means we
2660  * have been idling all along on this queue and it should be
2661  * ok to wait for this request to complete.
2662  */
2663  if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2664  && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2665  cfqq = NULL;
2666  goto keep_queue;
2667  } else
2668  goto check_group_idle;
2669  }
2670 
2671  /*
2672  * The active queue has requests and isn't expired, allow it to
2673  * dispatch.
2674  */
2675  if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2676  goto keep_queue;
2677 
2678  /*
2679  * If another queue has a request waiting within our mean seek
2680  * distance, let it run. The expire code will check for close
2681  * cooperators and put the close queue at the front of the service
2682  * tree. If possible, merge the expiring queue with the new cfqq.
2683  */
2684  new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2685  if (new_cfqq) {
2686  if (!cfqq->new_cfqq)
2687  cfq_setup_merge(cfqq, new_cfqq);
2688  goto expire;
2689  }
2690 
2691  /*
2692  * No requests pending. If the active queue still has requests in
2693  * flight or is idling for a new request, allow either of these
2694  * conditions to happen (or time out) before selecting a new queue.
2695  */
2696  if (timer_pending(&cfqd->idle_slice_timer)) {
2697  cfqq = NULL;
2698  goto keep_queue;
2699  }
2700 
2701  /*
2702  * This is a deep seek queue, but the device is much faster than
2703  * the queue can deliver, don't idle
2704  **/
2705  if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2706  (cfq_cfqq_slice_new(cfqq) ||
2707  (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2708  cfq_clear_cfqq_deep(cfqq);
2709  cfq_clear_cfqq_idle_window(cfqq);
2710  }
2711 
2712  if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2713  cfqq = NULL;
2714  goto keep_queue;
2715  }
2716 
2717  /*
2718  * If group idle is enabled and there are requests dispatched from
2719  * this group, wait for requests to complete.
2720  */
2721 check_group_idle:
2722  if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2723  cfqq->cfqg->dispatched &&
2724  !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2725  cfqq = NULL;
2726  goto keep_queue;
2727  }
2728 
2729 expire:
2730  cfq_slice_expired(cfqd, 0);
2731 new_queue:
2732  /*
2733  * Current queue expired. Check if we have to switch to a new
2734  * service tree
2735  */
2736  if (!new_cfqq)
2737  cfq_choose_cfqg(cfqd);
2738 
2739  cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2740 keep_queue:
2741  return cfqq;
2742 }
2743 
2744 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2745 {
2746  int dispatched = 0;
2747 
2748  while (cfqq->next_rq) {
2749  cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2750  dispatched++;
2751  }
2752 
2753  BUG_ON(!list_empty(&cfqq->fifo));
2754 
2755  /* By default cfqq is not expired if it is empty. Do it explicitly */
2756  __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2757  return dispatched;
2758 }
2759 
2760 /*
2761  * Drain our current requests. Used for barriers and when switching
2762  * io schedulers on-the-fly.
2763  */
2764 static int cfq_forced_dispatch(struct cfq_data *cfqd)
2765 {
2766  struct cfq_queue *cfqq;
2767  int dispatched = 0;
2768 
2769  /* Expire the timeslice of the current active queue first */
2770  cfq_slice_expired(cfqd, 0);
2771  while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2772  __cfq_set_active_queue(cfqd, cfqq);
2773  dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2774  }
2775 
2776  BUG_ON(cfqd->busy_queues);
2777 
2778  cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2779  return dispatched;
2780 }
2781 
2782 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2783  struct cfq_queue *cfqq)
2784 {
2785  /* the queue hasn't finished any request, can't estimate */
2786  if (cfq_cfqq_slice_new(cfqq))
2787  return true;
2788  if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2789  cfqq->slice_end))
2790  return true;
2791 
2792  return false;
2793 }
2794 
2795 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2796 {
2797  unsigned int max_dispatch;
2798 
2799  /*
2800  * Drain async requests before we start sync IO
2801  */
2802  if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2803  return false;
2804 
2805  /*
2806  * If this is an async queue and we have sync IO in flight, let it wait
2807  */
2808  if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2809  return false;
2810 
2811  max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2812  if (cfq_class_idle(cfqq))
2813  max_dispatch = 1;
2814 
2815  /*
2816  * Does this cfqq already have too much IO in flight?
2817  */
2818  if (cfqq->dispatched >= max_dispatch) {
2819  bool promote_sync = false;
2820  /*
2821  * idle queue must always only have a single IO in flight
2822  */
2823  if (cfq_class_idle(cfqq))
2824  return false;
2825 
2826  /*
2827  * If there is only one sync queue
2828  * we can ignore async queue here and give the sync
2829  * queue no dispatch limit. The reason is a sync queue can
2830  * preempt async queue, limiting the sync queue doesn't make
2831  * sense. This is useful for aiostress test.
2832  */
2833  if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2834  promote_sync = true;
2835 
2836  /*
2837  * We have other queues, don't allow more IO from this one
2838  */
2839  if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2840  !promote_sync)
2841  return false;
2842 
2843  /*
2844  * Sole queue user, no limit
2845  */
2846  if (cfqd->busy_queues == 1 || promote_sync)
2847  max_dispatch = -1;
2848  else
2849  /*
2850  * Normally we start throttling cfqq when cfq_quantum/2
2851  * requests have been dispatched. But we can drive
2852  * deeper queue depths at the beginning of slice
2853  * subjected to upper limit of cfq_quantum.
2854  * */
2855  max_dispatch = cfqd->cfq_quantum;
2856  }
2857 
2858  /*
2859  * Async queues must wait a bit before being allowed dispatch.
2860  * We also ramp up the dispatch depth gradually for async IO,
2861  * based on the last sync IO we serviced
2862  */
2863  if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2864  unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2865  unsigned int depth;
2866 
2867  depth = last_sync / cfqd->cfq_slice[1];
2868  if (!depth && !cfqq->dispatched)
2869  depth = 1;
2870  if (depth < max_dispatch)
2871  max_dispatch = depth;
2872  }
2873 
2874  /*
2875  * If we're below the current max, allow a dispatch
2876  */
2877  return cfqq->dispatched < max_dispatch;
2878 }
2879 
2880 /*
2881  * Dispatch a request from cfqq, moving them to the request queue
2882  * dispatch list.
2883  */
2884 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2885 {
2886  struct request *rq;
2887 
2888  BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2889 
2890  if (!cfq_may_dispatch(cfqd, cfqq))
2891  return false;
2892 
2893  /*
2894  * follow expired path, else get first next available
2895  */
2896  rq = cfq_check_fifo(cfqq);
2897  if (!rq)
2898  rq = cfqq->next_rq;
2899 
2900  /*
2901  * insert request into driver dispatch list
2902  */
2903  cfq_dispatch_insert(cfqd->queue, rq);
2904 
2905  if (!cfqd->active_cic) {
2906  struct cfq_io_cq *cic = RQ_CIC(rq);
2907 
2908  atomic_long_inc(&cic->icq.ioc->refcount);
2909  cfqd->active_cic = cic;
2910  }
2911 
2912  return true;
2913 }
2914 
2915 /*
2916  * Find the cfqq that we need to service and move a request from that to the
2917  * dispatch list
2918  */
2919 static int cfq_dispatch_requests(struct request_queue *q, int force)
2920 {
2921  struct cfq_data *cfqd = q->elevator->elevator_data;
2922  struct cfq_queue *cfqq;
2923 
2924  if (!cfqd->busy_queues)
2925  return 0;
2926 
2927  if (unlikely(force))
2928  return cfq_forced_dispatch(cfqd);
2929 
2930  cfqq = cfq_select_queue(cfqd);
2931  if (!cfqq)
2932  return 0;
2933 
2934  /*
2935  * Dispatch a request from this cfqq, if it is allowed
2936  */
2937  if (!cfq_dispatch_request(cfqd, cfqq))
2938  return 0;
2939 
2940  cfqq->slice_dispatch++;
2941  cfq_clear_cfqq_must_dispatch(cfqq);
2942 
2943  /*
2944  * expire an async queue immediately if it has used up its slice. idle
2945  * queue always expire after 1 dispatch round.
2946  */
2947  if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2948  cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2949  cfq_class_idle(cfqq))) {
2950  cfqq->slice_end = jiffies + 1;
2951  cfq_slice_expired(cfqd, 0);
2952  }
2953 
2954  cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2955  return 1;
2956 }
2957 
2958 /*
2959  * task holds one reference to the queue, dropped when task exits. each rq
2960  * in-flight on this queue also holds a reference, dropped when rq is freed.
2961  *
2962  * Each cfq queue took a reference on the parent group. Drop it now.
2963  * queue lock must be held here.
2964  */
2965 static void cfq_put_queue(struct cfq_queue *cfqq)
2966 {
2967  struct cfq_data *cfqd = cfqq->cfqd;
2968  struct cfq_group *cfqg;
2969 
2970  BUG_ON(cfqq->ref <= 0);
2971 
2972  cfqq->ref--;
2973  if (cfqq->ref)
2974  return;
2975 
2976  cfq_log_cfqq(cfqd, cfqq, "put_queue");
2977  BUG_ON(rb_first(&cfqq->sort_list));
2978  BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2979  cfqg = cfqq->cfqg;
2980 
2981  if (unlikely(cfqd->active_queue == cfqq)) {
2982  __cfq_slice_expired(cfqd, cfqq, 0);
2983  cfq_schedule_dispatch(cfqd);
2984  }
2985 
2986  BUG_ON(cfq_cfqq_on_rr(cfqq));
2987  kmem_cache_free(cfq_pool, cfqq);
2988  cfqg_put(cfqg);
2989 }
2990 
2991 static void cfq_put_cooperator(struct cfq_queue *cfqq)
2992 {
2993  struct cfq_queue *__cfqq, *next;
2994 
2995  /*
2996  * If this queue was scheduled to merge with another queue, be
2997  * sure to drop the reference taken on that queue (and others in
2998  * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
2999  */
3000  __cfqq = cfqq->new_cfqq;
3001  while (__cfqq) {
3002  if (__cfqq == cfqq) {
3003  WARN(1, "cfqq->new_cfqq loop detected\n");
3004  break;
3005  }
3006  next = __cfqq->new_cfqq;
3007  cfq_put_queue(__cfqq);
3008  __cfqq = next;
3009  }
3010 }
3011 
3012 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3013 {
3014  if (unlikely(cfqq == cfqd->active_queue)) {
3015  __cfq_slice_expired(cfqd, cfqq, 0);
3016  cfq_schedule_dispatch(cfqd);
3017  }
3018 
3019  cfq_put_cooperator(cfqq);
3020 
3021  cfq_put_queue(cfqq);
3022 }
3023 
3024 static void cfq_init_icq(struct io_cq *icq)
3025 {
3026  struct cfq_io_cq *cic = icq_to_cic(icq);
3027 
3028  cic->ttime.last_end_request = jiffies;
3029 }
3030 
3031 static void cfq_exit_icq(struct io_cq *icq)
3032 {
3033  struct cfq_io_cq *cic = icq_to_cic(icq);
3034  struct cfq_data *cfqd = cic_to_cfqd(cic);
3035 
3036  if (cic->cfqq[BLK_RW_ASYNC]) {
3037  cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3038  cic->cfqq[BLK_RW_ASYNC] = NULL;
3039  }
3040 
3041  if (cic->cfqq[BLK_RW_SYNC]) {
3042  cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3043  cic->cfqq[BLK_RW_SYNC] = NULL;
3044  }
3045 }
3046 
3047 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3048 {
3049  struct task_struct *tsk = current;
3050  int ioprio_class;
3051 
3052  if (!cfq_cfqq_prio_changed(cfqq))
3053  return;
3054 
3055  ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3056  switch (ioprio_class) {
3057  default:
3058  printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3059  case IOPRIO_CLASS_NONE:
3060  /*
3061  * no prio set, inherit CPU scheduling settings
3062  */
3063  cfqq->ioprio = task_nice_ioprio(tsk);
3064  cfqq->ioprio_class = task_nice_ioclass(tsk);
3065  break;
3066  case IOPRIO_CLASS_RT:
3067  cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3068  cfqq->ioprio_class = IOPRIO_CLASS_RT;
3069  break;
3070  case IOPRIO_CLASS_BE:
3071  cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3072  cfqq->ioprio_class = IOPRIO_CLASS_BE;
3073  break;
3074  case IOPRIO_CLASS_IDLE:
3076  cfqq->ioprio = 7;
3077  cfq_clear_cfqq_idle_window(cfqq);
3078  break;
3079  }
3080 
3081  /*
3082  * keep track of original prio settings in case we have to temporarily
3083  * elevate the priority of this queue
3084  */
3085  cfqq->org_ioprio = cfqq->ioprio;
3086  cfq_clear_cfqq_prio_changed(cfqq);
3087 }
3088 
3089 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3090 {
3091  int ioprio = cic->icq.ioc->ioprio;
3092  struct cfq_data *cfqd = cic_to_cfqd(cic);
3093  struct cfq_queue *cfqq;
3094 
3095  /*
3096  * Check whether ioprio has changed. The condition may trigger
3097  * spuriously on a newly created cic but there's no harm.
3098  */
3099  if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3100  return;
3101 
3102  cfqq = cic->cfqq[BLK_RW_ASYNC];
3103  if (cfqq) {
3104  struct cfq_queue *new_cfqq;
3105  new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3106  GFP_ATOMIC);
3107  if (new_cfqq) {
3108  cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3109  cfq_put_queue(cfqq);
3110  }
3111  }
3112 
3113  cfqq = cic->cfqq[BLK_RW_SYNC];
3114  if (cfqq)
3115  cfq_mark_cfqq_prio_changed(cfqq);
3116 
3117  cic->ioprio = ioprio;
3118 }
3119 
3120 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3121  pid_t pid, bool is_sync)
3122 {
3123  RB_CLEAR_NODE(&cfqq->rb_node);
3124  RB_CLEAR_NODE(&cfqq->p_node);
3125  INIT_LIST_HEAD(&cfqq->fifo);
3126 
3127  cfqq->ref = 0;
3128  cfqq->cfqd = cfqd;
3129 
3130  cfq_mark_cfqq_prio_changed(cfqq);
3131 
3132  if (is_sync) {
3133  if (!cfq_class_idle(cfqq))
3134  cfq_mark_cfqq_idle_window(cfqq);
3135  cfq_mark_cfqq_sync(cfqq);
3136  }
3137  cfqq->pid = pid;
3138 }
3139 
3140 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3141 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3142 {
3143  struct cfq_data *cfqd = cic_to_cfqd(cic);
3144  struct cfq_queue *sync_cfqq;
3145  uint64_t id;
3146 
3147  rcu_read_lock();
3148  id = bio_blkcg(bio)->id;
3149  rcu_read_unlock();
3150 
3151  /*
3152  * Check whether blkcg has changed. The condition may trigger
3153  * spuriously on a newly created cic but there's no harm.
3154  */
3155  if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3156  return;
3157 
3158  sync_cfqq = cic_to_cfqq(cic, 1);
3159  if (sync_cfqq) {
3160  /*
3161  * Drop reference to sync queue. A new sync queue will be
3162  * assigned in new group upon arrival of a fresh request.
3163  */
3164  cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3165  cic_set_cfqq(cic, NULL, 1);
3166  cfq_put_queue(sync_cfqq);
3167  }
3168 
3169  cic->blkcg_id = id;
3170 }
3171 #else
3172 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3173 #endif /* CONFIG_CFQ_GROUP_IOSCHED */
3174 
3175 static struct cfq_queue *
3176 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3177  struct bio *bio, gfp_t gfp_mask)
3178 {
3179  struct blkcg *blkcg;
3180  struct cfq_queue *cfqq, *new_cfqq = NULL;
3181  struct cfq_group *cfqg;
3182 
3183 retry:
3184  rcu_read_lock();
3185 
3186  blkcg = bio_blkcg(bio);
3187  cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3188  cfqq = cic_to_cfqq(cic, is_sync);
3189 
3190  /*
3191  * Always try a new alloc if we fell back to the OOM cfqq
3192  * originally, since it should just be a temporary situation.
3193  */
3194  if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3195  cfqq = NULL;
3196  if (new_cfqq) {
3197  cfqq = new_cfqq;
3198  new_cfqq = NULL;
3199  } else if (gfp_mask & __GFP_WAIT) {
3200  rcu_read_unlock();
3201  spin_unlock_irq(cfqd->queue->queue_lock);
3202  new_cfqq = kmem_cache_alloc_node(cfq_pool,
3203  gfp_mask | __GFP_ZERO,
3204  cfqd->queue->node);
3205  spin_lock_irq(cfqd->queue->queue_lock);
3206  if (new_cfqq)
3207  goto retry;
3208  } else {
3209  cfqq = kmem_cache_alloc_node(cfq_pool,
3210  gfp_mask | __GFP_ZERO,
3211  cfqd->queue->node);
3212  }
3213 
3214  if (cfqq) {
3215  cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3216  cfq_init_prio_data(cfqq, cic);
3217  cfq_link_cfqq_cfqg(cfqq, cfqg);
3218  cfq_log_cfqq(cfqd, cfqq, "alloced");
3219  } else
3220  cfqq = &cfqd->oom_cfqq;
3221  }
3222 
3223  if (new_cfqq)
3224  kmem_cache_free(cfq_pool, new_cfqq);
3225 
3226  rcu_read_unlock();
3227  return cfqq;
3228 }
3229 
3230 static struct cfq_queue **
3231 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3232 {
3233  switch (ioprio_class) {
3234  case IOPRIO_CLASS_RT:
3235  return &cfqd->async_cfqq[0][ioprio];
3236  case IOPRIO_CLASS_NONE:
3237  ioprio = IOPRIO_NORM;
3238  /* fall through */
3239  case IOPRIO_CLASS_BE:
3240  return &cfqd->async_cfqq[1][ioprio];
3241  case IOPRIO_CLASS_IDLE:
3242  return &cfqd->async_idle_cfqq;
3243  default:
3244  BUG();
3245  }
3246 }
3247 
3248 static struct cfq_queue *
3249 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3250  struct bio *bio, gfp_t gfp_mask)
3251 {
3252  const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3253  const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3254  struct cfq_queue **async_cfqq = NULL;
3255  struct cfq_queue *cfqq = NULL;
3256 
3257  if (!is_sync) {
3258  async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3259  cfqq = *async_cfqq;
3260  }
3261 
3262  if (!cfqq)
3263  cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3264 
3265  /*
3266  * pin the queue now that it's allocated, scheduler exit will prune it
3267  */
3268  if (!is_sync && !(*async_cfqq)) {
3269  cfqq->ref++;
3270  *async_cfqq = cfqq;
3271  }
3272 
3273  cfqq->ref++;
3274  return cfqq;
3275 }
3276 
3277 static void
3278 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3279 {
3280  unsigned long elapsed = jiffies - ttime->last_end_request;
3281  elapsed = min(elapsed, 2UL * slice_idle);
3282 
3283  ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3284  ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3285  ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3286 }
3287 
3288 static void
3289 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3290  struct cfq_io_cq *cic)
3291 {
3292  if (cfq_cfqq_sync(cfqq)) {
3293  __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3294  __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3295  cfqd->cfq_slice_idle);
3296  }
3297 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3298  __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3299 #endif
3300 }
3301 
3302 static void
3303 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3304  struct request *rq)
3305 {
3306  sector_t sdist = 0;
3307  sector_t n_sec = blk_rq_sectors(rq);
3308  if (cfqq->last_request_pos) {
3309  if (cfqq->last_request_pos < blk_rq_pos(rq))
3310  sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3311  else
3312  sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3313  }
3314 
3315  cfqq->seek_history <<= 1;
3316  if (blk_queue_nonrot(cfqd->queue))
3317  cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3318  else
3319  cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3320 }
3321 
3322 /*
3323  * Disable idle window if the process thinks too long or seeks so much that
3324  * it doesn't matter
3325  */
3326 static void
3327 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3328  struct cfq_io_cq *cic)
3329 {
3330  int old_idle, enable_idle;
3331 
3332  /*
3333  * Don't idle for async or idle io prio class
3334  */
3335  if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3336  return;
3337 
3338  enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3339 
3340  if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3341  cfq_mark_cfqq_deep(cfqq);
3342 
3343  if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3344  enable_idle = 0;
3345  else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3346  !cfqd->cfq_slice_idle ||
3347  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3348  enable_idle = 0;
3349  else if (sample_valid(cic->ttime.ttime_samples)) {
3350  if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3351  enable_idle = 0;
3352  else
3353  enable_idle = 1;
3354  }
3355 
3356  if (old_idle != enable_idle) {
3357  cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3358  if (enable_idle)
3359  cfq_mark_cfqq_idle_window(cfqq);
3360  else
3361  cfq_clear_cfqq_idle_window(cfqq);
3362  }
3363 }
3364 
3365 /*
3366  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3367  * no or if we aren't sure, a 1 will cause a preempt.
3368  */
3369 static bool
3370 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3371  struct request *rq)
3372 {
3373  struct cfq_queue *cfqq;
3374 
3375  cfqq = cfqd->active_queue;
3376  if (!cfqq)
3377  return false;
3378 
3379  if (cfq_class_idle(new_cfqq))
3380  return false;
3381 
3382  if (cfq_class_idle(cfqq))
3383  return true;
3384 
3385  /*
3386  * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3387  */
3388  if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3389  return false;
3390 
3391  /*
3392  * if the new request is sync, but the currently running queue is
3393  * not, let the sync request have priority.
3394  */
3395  if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3396  return true;
3397 
3398  if (new_cfqq->cfqg != cfqq->cfqg)
3399  return false;
3400 
3401  if (cfq_slice_used(cfqq))
3402  return true;
3403 
3404  /* Allow preemption only if we are idling on sync-noidle tree */
3405  if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3406  cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3407  new_cfqq->service_tree->count == 2 &&
3408  RB_EMPTY_ROOT(&cfqq->sort_list))
3409  return true;
3410 
3411  /*
3412  * So both queues are sync. Let the new request get disk time if
3413  * it's a metadata request and the current queue is doing regular IO.
3414  */
3415  if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3416  return true;
3417 
3418  /*
3419  * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3420  */
3421  if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3422  return true;
3423 
3424  /* An idle queue should not be idle now for some reason */
3425  if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3426  return true;
3427 
3428  if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3429  return false;
3430 
3431  /*
3432  * if this request is as-good as one we would expect from the
3433  * current cfqq, let it preempt
3434  */
3435  if (cfq_rq_close(cfqd, cfqq, rq))
3436  return true;
3437 
3438  return false;
3439 }
3440 
3441 /*
3442  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3443  * let it have half of its nominal slice.
3444  */
3445 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3446 {
3447  enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3448 
3449  cfq_log_cfqq(cfqd, cfqq, "preempt");
3450  cfq_slice_expired(cfqd, 1);
3451 
3452  /*
3453  * workload type is changed, don't save slice, otherwise preempt
3454  * doesn't happen
3455  */
3456  if (old_type != cfqq_type(cfqq))
3457  cfqq->cfqg->saved_workload_slice = 0;
3458 
3459  /*
3460  * Put the new queue at the front of the of the current list,
3461  * so we know that it will be selected next.
3462  */
3463  BUG_ON(!cfq_cfqq_on_rr(cfqq));
3464 
3465  cfq_service_tree_add(cfqd, cfqq, 1);
3466 
3467  cfqq->slice_end = 0;
3468  cfq_mark_cfqq_slice_new(cfqq);
3469 }
3470 
3471 /*
3472  * Called when a new fs request (rq) is added (to cfqq). Check if there's
3473  * something we should do about it
3474  */
3475 static void
3476 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3477  struct request *rq)
3478 {
3479  struct cfq_io_cq *cic = RQ_CIC(rq);
3480 
3481  cfqd->rq_queued++;
3482  if (rq->cmd_flags & REQ_PRIO)
3483  cfqq->prio_pending++;
3484 
3485  cfq_update_io_thinktime(cfqd, cfqq, cic);
3486  cfq_update_io_seektime(cfqd, cfqq, rq);
3487  cfq_update_idle_window(cfqd, cfqq, cic);
3488 
3489  cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3490 
3491  if (cfqq == cfqd->active_queue) {
3492  /*
3493  * Remember that we saw a request from this process, but
3494  * don't start queuing just yet. Otherwise we risk seeing lots
3495  * of tiny requests, because we disrupt the normal plugging
3496  * and merging. If the request is already larger than a single
3497  * page, let it rip immediately. For that case we assume that
3498  * merging is already done. Ditto for a busy system that
3499  * has other work pending, don't risk delaying until the
3500  * idle timer unplug to continue working.
3501  */
3502  if (cfq_cfqq_wait_request(cfqq)) {
3503  if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3504  cfqd->busy_queues > 1) {
3505  cfq_del_timer(cfqd, cfqq);
3506  cfq_clear_cfqq_wait_request(cfqq);
3507  __blk_run_queue(cfqd->queue);
3508  } else {
3509  cfqg_stats_update_idle_time(cfqq->cfqg);
3510  cfq_mark_cfqq_must_dispatch(cfqq);
3511  }
3512  }
3513  } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3514  /*
3515  * not the active queue - expire current slice if it is
3516  * idle and has expired it's mean thinktime or this new queue
3517  * has some old slice time left and is of higher priority or
3518  * this new queue is RT and the current one is BE
3519  */
3520  cfq_preempt_queue(cfqd, cfqq);
3521  __blk_run_queue(cfqd->queue);
3522  }
3523 }
3524 
3525 static void cfq_insert_request(struct request_queue *q, struct request *rq)
3526 {
3527  struct cfq_data *cfqd = q->elevator->elevator_data;
3528  struct cfq_queue *cfqq = RQ_CFQQ(rq);
3529 
3530  cfq_log_cfqq(cfqd, cfqq, "insert_request");
3531  cfq_init_prio_data(cfqq, RQ_CIC(rq));
3532 
3533  rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3534  list_add_tail(&rq->queuelist, &cfqq->fifo);
3535  cfq_add_rq_rb(rq);
3536  cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3537  rq->cmd_flags);
3538  cfq_rq_enqueued(cfqd, cfqq, rq);
3539 }
3540 
3541 /*
3542  * Update hw_tag based on peak queue depth over 50 samples under
3543  * sufficient load.
3544  */
3545 static void cfq_update_hw_tag(struct cfq_data *cfqd)
3546 {
3547  struct cfq_queue *cfqq = cfqd->active_queue;
3548 
3549  if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3550  cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3551 
3552  if (cfqd->hw_tag == 1)
3553  return;
3554 
3555  if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3556  cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3557  return;
3558 
3559  /*
3560  * If active queue hasn't enough requests and can idle, cfq might not
3561  * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3562  * case
3563  */
3564  if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3565  cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3567  return;
3568 
3569  if (cfqd->hw_tag_samples++ < 50)
3570  return;
3571 
3572  if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3573  cfqd->hw_tag = 1;
3574  else
3575  cfqd->hw_tag = 0;
3576 }
3577 
3578 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3579 {
3580  struct cfq_io_cq *cic = cfqd->active_cic;
3581 
3582  /* If the queue already has requests, don't wait */
3583  if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3584  return false;
3585 
3586  /* If there are other queues in the group, don't wait */
3587  if (cfqq->cfqg->nr_cfqq > 1)
3588  return false;
3589 
3590  /* the only queue in the group, but think time is big */
3591  if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3592  return false;
3593 
3594  if (cfq_slice_used(cfqq))
3595  return true;
3596 
3597  /* if slice left is less than think time, wait busy */
3598  if (cic && sample_valid(cic->ttime.ttime_samples)
3599  && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3600  return true;
3601 
3602  /*
3603  * If think times is less than a jiffy than ttime_mean=0 and above
3604  * will not be true. It might happen that slice has not expired yet
3605  * but will expire soon (4-5 ns) during select_queue(). To cover the
3606  * case where think time is less than a jiffy, mark the queue wait
3607  * busy if only 1 jiffy is left in the slice.
3608  */
3609  if (cfqq->slice_end - jiffies == 1)
3610  return true;
3611 
3612  return false;
3613 }
3614 
3615 static void cfq_completed_request(struct request_queue *q, struct request *rq)
3616 {
3617  struct cfq_queue *cfqq = RQ_CFQQ(rq);
3618  struct cfq_data *cfqd = cfqq->cfqd;
3619  const int sync = rq_is_sync(rq);
3620  unsigned long now;
3621 
3622  now = jiffies;
3623  cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3624  !!(rq->cmd_flags & REQ_NOIDLE));
3625 
3626  cfq_update_hw_tag(cfqd);
3627 
3628  WARN_ON(!cfqd->rq_in_driver);
3629  WARN_ON(!cfqq->dispatched);
3630  cfqd->rq_in_driver--;
3631  cfqq->dispatched--;
3632  (RQ_CFQG(rq))->dispatched--;
3633  cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
3634  rq_io_start_time_ns(rq), rq->cmd_flags);
3635 
3636  cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3637 
3638  if (sync) {
3639  struct cfq_rb_root *service_tree;
3640 
3641  RQ_CIC(rq)->ttime.last_end_request = now;
3642 
3643  if (cfq_cfqq_on_rr(cfqq))
3644  service_tree = cfqq->service_tree;
3645  else
3646  service_tree = service_tree_for(cfqq->cfqg,
3647  cfqq_prio(cfqq), cfqq_type(cfqq));
3648  service_tree->ttime.last_end_request = now;
3649  if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3650  cfqd->last_delayed_sync = now;
3651  }
3652 
3653 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3654  cfqq->cfqg->ttime.last_end_request = now;
3655 #endif
3656 
3657  /*
3658  * If this is the active queue, check if it needs to be expired,
3659  * or if we want to idle in case it has no pending requests.
3660  */
3661  if (cfqd->active_queue == cfqq) {
3662  const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3663 
3664  if (cfq_cfqq_slice_new(cfqq)) {
3665  cfq_set_prio_slice(cfqd, cfqq);
3666  cfq_clear_cfqq_slice_new(cfqq);
3667  }
3668 
3669  /*
3670  * Should we wait for next request to come in before we expire
3671  * the queue.
3672  */
3673  if (cfq_should_wait_busy(cfqd, cfqq)) {
3674  unsigned long extend_sl = cfqd->cfq_slice_idle;
3675  if (!cfqd->cfq_slice_idle)
3676  extend_sl = cfqd->cfq_group_idle;
3677  cfqq->slice_end = jiffies + extend_sl;
3678  cfq_mark_cfqq_wait_busy(cfqq);
3679  cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3680  }
3681 
3682  /*
3683  * Idling is not enabled on:
3684  * - expired queues
3685  * - idle-priority queues
3686  * - async queues
3687  * - queues with still some requests queued
3688  * - when there is a close cooperator
3689  */
3690  if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3691  cfq_slice_expired(cfqd, 1);
3692  else if (sync && cfqq_empty &&
3693  !cfq_close_cooperator(cfqd, cfqq)) {
3694  cfq_arm_slice_timer(cfqd);
3695  }
3696  }
3697 
3698  if (!cfqd->rq_in_driver)
3699  cfq_schedule_dispatch(cfqd);
3700 }
3701 
3702 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3703 {
3704  if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3705  cfq_mark_cfqq_must_alloc_slice(cfqq);
3706  return ELV_MQUEUE_MUST;
3707  }
3708 
3709  return ELV_MQUEUE_MAY;
3710 }
3711 
3712 static int cfq_may_queue(struct request_queue *q, int rw)
3713 {
3714  struct cfq_data *cfqd = q->elevator->elevator_data;
3715  struct task_struct *tsk = current;
3716  struct cfq_io_cq *cic;
3717  struct cfq_queue *cfqq;
3718 
3719  /*
3720  * don't force setup of a queue from here, as a call to may_queue
3721  * does not necessarily imply that a request actually will be queued.
3722  * so just lookup a possibly existing queue, or return 'may queue'
3723  * if that fails
3724  */
3725  cic = cfq_cic_lookup(cfqd, tsk->io_context);
3726  if (!cic)
3727  return ELV_MQUEUE_MAY;
3728 
3729  cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3730  if (cfqq) {
3731  cfq_init_prio_data(cfqq, cic);
3732 
3733  return __cfq_may_queue(cfqq);
3734  }
3735 
3736  return ELV_MQUEUE_MAY;
3737 }
3738 
3739 /*
3740  * queue lock held here
3741  */
3742 static void cfq_put_request(struct request *rq)
3743 {
3744  struct cfq_queue *cfqq = RQ_CFQQ(rq);
3745 
3746  if (cfqq) {
3747  const int rw = rq_data_dir(rq);
3748 
3749  BUG_ON(!cfqq->allocated[rw]);
3750  cfqq->allocated[rw]--;
3751 
3752  /* Put down rq reference on cfqg */
3753  cfqg_put(RQ_CFQG(rq));
3754  rq->elv.priv[0] = NULL;
3755  rq->elv.priv[1] = NULL;
3756 
3757  cfq_put_queue(cfqq);
3758  }
3759 }
3760 
3761 static struct cfq_queue *
3762 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3763  struct cfq_queue *cfqq)
3764 {
3765  cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3766  cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3767  cfq_mark_cfqq_coop(cfqq->new_cfqq);
3768  cfq_put_queue(cfqq);
3769  return cic_to_cfqq(cic, 1);
3770 }
3771 
3772 /*
3773  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3774  * was the last process referring to said cfqq.
3775  */
3776 static struct cfq_queue *
3777 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3778 {
3779  if (cfqq_process_refs(cfqq) == 1) {
3780  cfqq->pid = current->pid;
3781  cfq_clear_cfqq_coop(cfqq);
3782  cfq_clear_cfqq_split_coop(cfqq);
3783  return cfqq;
3784  }
3785 
3786  cic_set_cfqq(cic, NULL, 1);
3787 
3788  cfq_put_cooperator(cfqq);
3789 
3790  cfq_put_queue(cfqq);
3791  return NULL;
3792 }
3793 /*
3794  * Allocate cfq data structures associated with this request.
3795  */
3796 static int
3797 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
3798  gfp_t gfp_mask)
3799 {
3800  struct cfq_data *cfqd = q->elevator->elevator_data;
3801  struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3802  const int rw = rq_data_dir(rq);
3803  const bool is_sync = rq_is_sync(rq);
3804  struct cfq_queue *cfqq;
3805 
3806  might_sleep_if(gfp_mask & __GFP_WAIT);
3807 
3808  spin_lock_irq(q->queue_lock);
3809 
3810  check_ioprio_changed(cic, bio);
3811  check_blkcg_changed(cic, bio);
3812 new_queue:
3813  cfqq = cic_to_cfqq(cic, is_sync);
3814  if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3815  cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
3816  cic_set_cfqq(cic, cfqq, is_sync);
3817  } else {
3818  /*
3819  * If the queue was seeky for too long, break it apart.
3820  */
3821  if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3822  cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3823  cfqq = split_cfqq(cic, cfqq);
3824  if (!cfqq)
3825  goto new_queue;
3826  }
3827 
3828  /*
3829  * Check to see if this queue is scheduled to merge with
3830  * another, closely cooperating queue. The merging of
3831  * queues happens here as it must be done in process context.
3832  * The reference on new_cfqq was taken in merge_cfqqs.
3833  */
3834  if (cfqq->new_cfqq)
3835  cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3836  }
3837 
3838  cfqq->allocated[rw]++;
3839 
3840  cfqq->ref++;
3841  cfqg_get(cfqq->cfqg);
3842  rq->elv.priv[0] = cfqq;
3843  rq->elv.priv[1] = cfqq->cfqg;
3844  spin_unlock_irq(q->queue_lock);
3845  return 0;
3846 }
3847 
3848 static void cfq_kick_queue(struct work_struct *work)
3849 {
3850  struct cfq_data *cfqd =
3851  container_of(work, struct cfq_data, unplug_work);
3852  struct request_queue *q = cfqd->queue;
3853 
3854  spin_lock_irq(q->queue_lock);
3855  __blk_run_queue(cfqd->queue);
3856  spin_unlock_irq(q->queue_lock);
3857 }
3858 
3859 /*
3860  * Timer running if the active_queue is currently idling inside its time slice
3861  */
3862 static void cfq_idle_slice_timer(unsigned long data)
3863 {
3864  struct cfq_data *cfqd = (struct cfq_data *) data;
3865  struct cfq_queue *cfqq;
3866  unsigned long flags;
3867  int timed_out = 1;
3868 
3869  cfq_log(cfqd, "idle timer fired");
3870 
3871  spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3872 
3873  cfqq = cfqd->active_queue;
3874  if (cfqq) {
3875  timed_out = 0;
3876 
3877  /*
3878  * We saw a request before the queue expired, let it through
3879  */
3880  if (cfq_cfqq_must_dispatch(cfqq))
3881  goto out_kick;
3882 
3883  /*
3884  * expired
3885  */
3886  if (cfq_slice_used(cfqq))
3887  goto expire;
3888 
3889  /*
3890  * only expire and reinvoke request handler, if there are
3891  * other queues with pending requests
3892  */
3893  if (!cfqd->busy_queues)
3894  goto out_cont;
3895 
3896  /*
3897  * not expired and it has a request pending, let it dispatch
3898  */
3899  if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3900  goto out_kick;
3901 
3902  /*
3903  * Queue depth flag is reset only when the idle didn't succeed
3904  */
3905  cfq_clear_cfqq_deep(cfqq);
3906  }
3907 expire:
3908  cfq_slice_expired(cfqd, timed_out);
3909 out_kick:
3910  cfq_schedule_dispatch(cfqd);
3911 out_cont:
3912  spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3913 }
3914 
3915 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3916 {
3918  cancel_work_sync(&cfqd->unplug_work);
3919 }
3920 
3921 static void cfq_put_async_queues(struct cfq_data *cfqd)
3922 {
3923  int i;
3924 
3925  for (i = 0; i < IOPRIO_BE_NR; i++) {
3926  if (cfqd->async_cfqq[0][i])
3927  cfq_put_queue(cfqd->async_cfqq[0][i]);
3928  if (cfqd->async_cfqq[1][i])
3929  cfq_put_queue(cfqd->async_cfqq[1][i]);
3930  }
3931 
3932  if (cfqd->async_idle_cfqq)
3933  cfq_put_queue(cfqd->async_idle_cfqq);
3934 }
3935 
3936 static void cfq_exit_queue(struct elevator_queue *e)
3937 {
3938  struct cfq_data *cfqd = e->elevator_data;
3939  struct request_queue *q = cfqd->queue;
3940 
3941  cfq_shutdown_timer_wq(cfqd);
3942 
3943  spin_lock_irq(q->queue_lock);
3944 
3945  if (cfqd->active_queue)
3946  __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3947 
3948  cfq_put_async_queues(cfqd);
3949 
3950  spin_unlock_irq(q->queue_lock);
3951 
3952  cfq_shutdown_timer_wq(cfqd);
3953 
3954 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3955  blkcg_deactivate_policy(q, &blkcg_policy_cfq);
3956 #else
3957  kfree(cfqd->root_group);
3958 #endif
3959  kfree(cfqd);
3960 }
3961 
3962 static int cfq_init_queue(struct request_queue *q)
3963 {
3964  struct cfq_data *cfqd;
3965  struct blkcg_gq *blkg __maybe_unused;
3966  int i, ret;
3967 
3968  cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3969  if (!cfqd)
3970  return -ENOMEM;
3971 
3972  cfqd->queue = q;
3973  q->elevator->elevator_data = cfqd;
3974 
3975  /* Init root service tree */
3976  cfqd->grp_service_tree = CFQ_RB_ROOT;
3977 
3978  /* Init root group and prefer root group over other groups by default */
3979 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3980  ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
3981  if (ret)
3982  goto out_free;
3983 
3984  cfqd->root_group = blkg_to_cfqg(q->root_blkg);
3985 #else
3986  ret = -ENOMEM;
3987  cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3988  GFP_KERNEL, cfqd->queue->node);
3989  if (!cfqd->root_group)
3990  goto out_free;
3991 
3992  cfq_init_cfqg_base(cfqd->root_group);
3993 #endif
3994  cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
3995 
3996  /*
3997  * Not strictly needed (since RB_ROOT just clears the node and we
3998  * zeroed cfqd on alloc), but better be safe in case someone decides
3999  * to add magic to the rb code
4000  */
4001  for (i = 0; i < CFQ_PRIO_LISTS; i++)
4002  cfqd->prio_trees[i] = RB_ROOT;
4003 
4004  /*
4005  * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4006  * Grab a permanent reference to it, so that the normal code flow
4007  * will not attempt to free it. oom_cfqq is linked to root_group
4008  * but shouldn't hold a reference as it'll never be unlinked. Lose
4009  * the reference from linking right away.
4010  */
4011  cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4012  cfqd->oom_cfqq.ref++;
4013 
4014  spin_lock_irq(q->queue_lock);
4015  cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4016  cfqg_put(cfqd->root_group);
4017  spin_unlock_irq(q->queue_lock);
4018 
4019  init_timer(&cfqd->idle_slice_timer);
4020  cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4021  cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4022 
4023  INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4024 
4025  cfqd->cfq_quantum = cfq_quantum;
4026  cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4027  cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4028  cfqd->cfq_back_max = cfq_back_max;
4029  cfqd->cfq_back_penalty = cfq_back_penalty;
4030  cfqd->cfq_slice[0] = cfq_slice_async;
4031  cfqd->cfq_slice[1] = cfq_slice_sync;
4032  cfqd->cfq_target_latency = cfq_target_latency;
4033  cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4034  cfqd->cfq_slice_idle = cfq_slice_idle;
4035  cfqd->cfq_group_idle = cfq_group_idle;
4036  cfqd->cfq_latency = 1;
4037  cfqd->hw_tag = -1;
4038  /*
4039  * we optimistically start assuming sync ops weren't delayed in last
4040  * second, in order to have larger depth for async operations.
4041  */
4042  cfqd->last_delayed_sync = jiffies - HZ;
4043  return 0;
4044 
4045 out_free:
4046  kfree(cfqd);
4047  return ret;
4048 }
4049 
4050 /*
4051  * sysfs parts below -->
4052  */
4053 static ssize_t
4054 cfq_var_show(unsigned int var, char *page)
4055 {
4056  return sprintf(page, "%d\n", var);
4057 }
4058 
4059 static ssize_t
4060 cfq_var_store(unsigned int *var, const char *page, size_t count)
4061 {
4062  char *p = (char *) page;
4063 
4064  *var = simple_strtoul(p, &p, 10);
4065  return count;
4066 }
4067 
4068 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4069 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4070 { \
4071  struct cfq_data *cfqd = e->elevator_data; \
4072  unsigned int __data = __VAR; \
4073  if (__CONV) \
4074  __data = jiffies_to_msecs(__data); \
4075  return cfq_var_show(__data, (page)); \
4076 }
4077 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4078 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4079 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4080 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4081 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4082 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4083 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4084 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4085 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4086 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4087 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4088 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4089 #undef SHOW_FUNCTION
4090 
4091 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4092 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4093 { \
4094  struct cfq_data *cfqd = e->elevator_data; \
4095  unsigned int __data; \
4096  int ret = cfq_var_store(&__data, (page), count); \
4097  if (__data < (MIN)) \
4098  __data = (MIN); \
4099  else if (__data > (MAX)) \
4100  __data = (MAX); \
4101  if (__CONV) \
4102  *(__PTR) = msecs_to_jiffies(__data); \
4103  else \
4104  *(__PTR) = __data; \
4105  return ret; \
4106 }
4107 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4108 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4109  UINT_MAX, 1);
4110 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4111  UINT_MAX, 1);
4112 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4113 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4114  UINT_MAX, 0);
4115 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4116 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4117 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4118 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4119 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4120  UINT_MAX, 0);
4121 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4122 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4123 #undef STORE_FUNCTION
4124 
4125 #define CFQ_ATTR(name) \
4126  __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4127 
4128 static struct elv_fs_entry cfq_attrs[] = {
4129  CFQ_ATTR(quantum),
4130  CFQ_ATTR(fifo_expire_sync),
4131  CFQ_ATTR(fifo_expire_async),
4132  CFQ_ATTR(back_seek_max),
4133  CFQ_ATTR(back_seek_penalty),
4134  CFQ_ATTR(slice_sync),
4135  CFQ_ATTR(slice_async),
4136  CFQ_ATTR(slice_async_rq),
4137  CFQ_ATTR(slice_idle),
4138  CFQ_ATTR(group_idle),
4140  CFQ_ATTR(target_latency),
4141  __ATTR_NULL
4142 };
4143 
4144 static struct elevator_type iosched_cfq = {
4145  .ops = {
4146  .elevator_merge_fn = cfq_merge,
4147  .elevator_merged_fn = cfq_merged_request,
4148  .elevator_merge_req_fn = cfq_merged_requests,
4149  .elevator_allow_merge_fn = cfq_allow_merge,
4150  .elevator_bio_merged_fn = cfq_bio_merged,
4151  .elevator_dispatch_fn = cfq_dispatch_requests,
4152  .elevator_add_req_fn = cfq_insert_request,
4153  .elevator_activate_req_fn = cfq_activate_request,
4154  .elevator_deactivate_req_fn = cfq_deactivate_request,
4155  .elevator_completed_req_fn = cfq_completed_request,
4156  .elevator_former_req_fn = elv_rb_former_request,
4157  .elevator_latter_req_fn = elv_rb_latter_request,
4158  .elevator_init_icq_fn = cfq_init_icq,
4159  .elevator_exit_icq_fn = cfq_exit_icq,
4160  .elevator_set_req_fn = cfq_set_request,
4161  .elevator_put_req_fn = cfq_put_request,
4162  .elevator_may_queue_fn = cfq_may_queue,
4163  .elevator_init_fn = cfq_init_queue,
4164  .elevator_exit_fn = cfq_exit_queue,
4165  },
4166  .icq_size = sizeof(struct cfq_io_cq),
4167  .icq_align = __alignof__(struct cfq_io_cq),
4168  .elevator_attrs = cfq_attrs,
4169  .elevator_name = "cfq",
4170  .elevator_owner = THIS_MODULE,
4171 };
4172 
4173 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4174 static struct blkcg_policy blkcg_policy_cfq = {
4175  .pd_size = sizeof(struct cfq_group),
4176  .cftypes = cfq_blkcg_files,
4177 
4178  .pd_init_fn = cfq_pd_init,
4179  .pd_reset_stats_fn = cfq_pd_reset_stats,
4180 };
4181 #endif
4182 
4183 static int __init cfq_init(void)
4184 {
4185  int ret;
4186 
4187  /*
4188  * could be 0 on HZ < 1000 setups
4189  */
4190  if (!cfq_slice_async)
4191  cfq_slice_async = 1;
4192  if (!cfq_slice_idle)
4193  cfq_slice_idle = 1;
4194 
4195 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4196  if (!cfq_group_idle)
4197  cfq_group_idle = 1;
4198 
4199  ret = blkcg_policy_register(&blkcg_policy_cfq);
4200  if (ret)
4201  return ret;
4202 #else
4203  cfq_group_idle = 0;
4204 #endif
4205 
4206  ret = -ENOMEM;
4207  cfq_pool = KMEM_CACHE(cfq_queue, 0);
4208  if (!cfq_pool)
4209  goto err_pol_unreg;
4210 
4211  ret = elv_register(&iosched_cfq);
4212  if (ret)
4213  goto err_free_pool;
4214 
4215  return 0;
4216 
4217 err_free_pool:
4218  kmem_cache_destroy(cfq_pool);
4219 err_pol_unreg:
4220 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4221  blkcg_policy_unregister(&blkcg_policy_cfq);
4222 #endif
4223  return ret;
4224 }
4225 
4226 static void __exit cfq_exit(void)
4227 {
4228 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4229  blkcg_policy_unregister(&blkcg_policy_cfq);
4230 #endif
4231  elv_unregister(&iosched_cfq);
4232  kmem_cache_destroy(cfq_pool);
4233 }
4234 
4235 module_init(cfq_init);
4236 module_exit(cfq_exit);
4237 
4238 MODULE_AUTHOR("Jens Axboe");
4239 MODULE_LICENSE("GPL");
4240 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");