9 #include <linux/module.h>
10 #include <linux/slab.h>
14 #include <linux/rbtree.h>
16 #include <linux/blktrace_api.h>
24 static const int cfq_quantum = 8;
25 static const int cfq_fifo_expire[2] = {
HZ / 4,
HZ / 8 };
27 static const int cfq_back_max = 16 * 1024;
29 static const int cfq_back_penalty = 2;
30 static const int cfq_slice_sync =
HZ / 10;
31 static int cfq_slice_async =
HZ / 25;
32 static const int cfq_slice_async_rq = 2;
33 static int cfq_slice_idle =
HZ / 125;
34 static int cfq_group_idle =
HZ / 125;
35 static const int cfq_target_latency =
HZ * 3/10;
36 static const int cfq_hist_divisor = 4;
41 #define CFQ_IDLE_DELAY (HZ / 5)
46 #define CFQ_MIN_TT (2)
48 #define CFQ_SLICE_SCALE (5)
49 #define CFQ_HW_QUEUE_MIN (5)
50 #define CFQ_SERVICE_SHIFT 12
52 #define CFQQ_SEEK_THR (sector_t)(8 * 100)
53 #define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
54 #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
55 #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
57 #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
63 #define CFQ_PRIO_LISTS IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
67 #define sample_valid(samples) ((samples) > 80)
68 #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
92 #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
93 .ttime = {.last_end_request = jiffies,},}
175 #ifdef CONFIG_CFQ_GROUP_IOSCHED
177 struct blkg_rwstat service_bytes;
179 struct blkg_rwstat serviced;
181 struct blkg_rwstat merged;
183 struct blkg_rwstat service_time;
185 struct blkg_rwstat wait_time;
187 struct blkg_rwstat queued;
189 struct blkg_stat sectors;
191 struct blkg_stat time;
192 #ifdef CONFIG_DEBUG_BLK_CGROUP
194 struct blkg_stat unaccounted_time;
196 struct blkg_stat avg_queue_size_sum;
198 struct blkg_stat avg_queue_size_samples;
200 struct blkg_stat dequeue;
202 struct blkg_stat group_wait_time;
204 struct blkg_stat idle_time;
206 struct blkg_stat empty_time;
266 #ifdef CONFIG_CFQ_GROUP_IOSCHED
385 #define CFQ_CFQQ_FNS(name) \
386 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
388 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
390 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
392 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
394 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
396 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
421 return pd_to_blkg(&cfqg->
pd);
424 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
427 enum cfqg_stats_flags {
428 CFQG_stats_waiting = 0,
433 #define CFQG_FLAG_FNS(name) \
434 static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
436 stats->flags |= (1 << CFQG_stats_##name); \
438 static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
440 stats->flags &= ~(1 << CFQG_stats_##name); \
442 static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
444 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
447 CFQG_FLAG_FNS(waiting)
448 CFQG_FLAG_FNS(idling)
453 static void cfqg_stats_update_group_wait_time(
struct cfqg_stats *
stats)
455 unsigned long long now;
457 if (!cfqg_stats_waiting(stats))
462 blkg_stat_add(&stats->group_wait_time,
463 now - stats->start_group_wait_time);
464 cfqg_stats_clear_waiting(stats);
468 static void cfqg_stats_set_start_group_wait_time(
struct cfq_group *cfqg,
473 if (cfqg_stats_waiting(stats))
475 if (cfqg == curr_cfqg)
478 cfqg_stats_mark_waiting(stats);
482 static void cfqg_stats_end_empty_time(
struct cfqg_stats *stats)
484 unsigned long long now;
486 if (!cfqg_stats_empty(stats))
491 blkg_stat_add(&stats->empty_time,
492 now - stats->start_empty_time);
493 cfqg_stats_clear_empty(stats);
496 static void cfqg_stats_update_dequeue(
struct cfq_group *cfqg)
498 blkg_stat_add(&cfqg->
stats.dequeue, 1);
501 static void cfqg_stats_set_start_empty_time(
struct cfq_group *cfqg)
505 if (blkg_rwstat_sum(&stats->queued))
513 if (cfqg_stats_empty(stats))
517 cfqg_stats_mark_empty(stats);
520 static void cfqg_stats_update_idle_time(
struct cfq_group *cfqg)
524 if (cfqg_stats_idling(stats)) {
528 blkg_stat_add(&stats->idle_time,
529 now - stats->start_idle_time);
530 cfqg_stats_clear_idling(stats);
534 static void cfqg_stats_set_start_idle_time(
struct cfq_group *cfqg)
538 BUG_ON(cfqg_stats_idling(stats));
541 cfqg_stats_mark_idling(stats);
544 static void cfqg_stats_update_avg_queue_size(
struct cfq_group *cfqg)
548 blkg_stat_add(&stats->avg_queue_size_sum,
549 blkg_rwstat_sum(&stats->queued));
550 blkg_stat_add(&stats->avg_queue_size_samples, 1);
551 cfqg_stats_update_group_wait_time(stats);
556 static inline void cfqg_stats_set_start_group_wait_time(
struct cfq_group *cfqg,
struct cfq_group *curr_cfqg) { }
557 static inline void cfqg_stats_end_empty_time(
struct cfqg_stats *stats) { }
558 static inline void cfqg_stats_update_dequeue(
struct cfq_group *cfqg) { }
559 static inline void cfqg_stats_set_start_empty_time(
struct cfq_group *cfqg) { }
560 static inline void cfqg_stats_update_idle_time(
struct cfq_group *cfqg) { }
561 static inline void cfqg_stats_set_start_idle_time(
struct cfq_group *cfqg) { }
562 static inline void cfqg_stats_update_avg_queue_size(
struct cfq_group *cfqg) { }
566 #ifdef CONFIG_CFQ_GROUP_IOSCHED
572 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
575 static inline void cfqg_get(
struct cfq_group *cfqg)
577 return blkg_get(cfqg_to_blkg(cfqg));
580 static inline void cfqg_put(
struct cfq_group *cfqg)
582 return blkg_put(cfqg_to_blkg(cfqg));
585 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
588 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
589 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
590 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
594 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
597 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
598 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
601 static inline void cfqg_stats_update_io_add(
struct cfq_group *cfqg,
604 blkg_rwstat_add(&cfqg->
stats.queued, rw, 1);
605 cfqg_stats_end_empty_time(&cfqg->
stats);
606 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
609 static inline void cfqg_stats_update_timeslice_used(
struct cfq_group *cfqg,
610 unsigned long time,
unsigned long unaccounted_time)
612 blkg_stat_add(&cfqg->
stats.time, time);
613 #ifdef CONFIG_DEBUG_BLK_CGROUP
614 blkg_stat_add(&cfqg->
stats.unaccounted_time, unaccounted_time);
618 static inline void cfqg_stats_update_io_remove(
struct cfq_group *cfqg,
int rw)
620 blkg_rwstat_add(&cfqg->
stats.queued, rw, -1);
623 static inline void cfqg_stats_update_io_merged(
struct cfq_group *cfqg,
int rw)
625 blkg_rwstat_add(&cfqg->
stats.merged, rw, 1);
628 static inline void cfqg_stats_update_dispatch(
struct cfq_group *cfqg,
631 blkg_stat_add(&cfqg->
stats.sectors, bytes >> 9);
632 blkg_rwstat_add(&cfqg->
stats.serviced, rw, 1);
633 blkg_rwstat_add(&cfqg->
stats.service_bytes, rw, bytes);
636 static inline void cfqg_stats_update_completion(
struct cfq_group *cfqg,
643 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
645 blkg_rwstat_add(&stats->wait_time, rw,
646 io_start_time - start_time);
649 static void cfq_pd_reset_stats(
struct blkcg_gq *blkg)
651 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
655 blkg_rwstat_reset(&stats->service_bytes);
656 blkg_rwstat_reset(&stats->serviced);
657 blkg_rwstat_reset(&stats->merged);
658 blkg_rwstat_reset(&stats->service_time);
659 blkg_rwstat_reset(&stats->wait_time);
660 blkg_stat_reset(&stats->time);
661 #ifdef CONFIG_DEBUG_BLK_CGROUP
662 blkg_stat_reset(&stats->unaccounted_time);
663 blkg_stat_reset(&stats->avg_queue_size_sum);
664 blkg_stat_reset(&stats->avg_queue_size_samples);
665 blkg_stat_reset(&stats->dequeue);
666 blkg_stat_reset(&stats->group_wait_time);
667 blkg_stat_reset(&stats->idle_time);
668 blkg_stat_reset(&stats->empty_time);
674 static inline void cfqg_get(
struct cfq_group *cfqg) { }
675 static inline void cfqg_put(
struct cfq_group *cfqg) { }
677 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
678 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
679 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
681 static inline void cfqg_stats_update_io_add(
struct cfq_group *cfqg,
683 static inline void cfqg_stats_update_timeslice_used(
struct cfq_group *cfqg,
684 unsigned long time,
unsigned long unaccounted_time) { }
685 static inline void cfqg_stats_update_io_remove(
struct cfq_group *cfqg,
int rw) { }
686 static inline void cfqg_stats_update_io_merged(
struct cfq_group *cfqg,
int rw) { }
687 static inline void cfqg_stats_update_dispatch(
struct cfq_group *cfqg,
689 static inline void cfqg_stats_update_completion(
struct cfq_group *cfqg,
694 #define cfq_log(cfqd, fmt, args...) \
695 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
698 #define for_each_cfqg_st(cfqg, i, j, st) \
699 for (i = 0; i <= IDLE_WORKLOAD; i++) \
700 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
701 : &cfqg->service_tree_idle; \
702 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
703 (i == IDLE_WORKLOAD && j == 0); \
704 j++, st = i < IDLE_WORKLOAD ? \
705 &cfqg->service_trees[i][j]: NULL) \
707 static inline bool cfq_io_thinktime_big(
struct cfq_data *cfqd,
708 struct cfq_ttime *ttime,
bool group_idle)
720 static inline bool iops_mode(
struct cfq_data *cfqd)
747 if (!cfq_cfqq_sync(cfqq))
749 if (!cfq_cfqq_idle_window(cfqq))
754 static inline int cfq_group_busy_queues_wl(
enum wl_prio_t wl,
766 static inline int cfqg_busy_async_queues(
struct cfq_data *cfqd,
794 return cic->
cfqq[is_sync];
800 cic->
cfqq[is_sync] = cfqq;
805 return cic->
icq.q->elevator->elevator_data;
812 static inline bool cfq_bio_sync(
struct bio *bio)
814 return bio_data_dir(bio) ==
READ || (bio->bi_rw &
REQ_SYNC);
821 static inline void cfq_schedule_dispatch(
struct cfq_data *cfqd)
824 cfq_log(cfqd,
"schedule dispatch");
834 static inline int cfq_prio_slice(
struct cfq_data *cfqd,
bool sync,
847 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->
ioprio);
859 static inline u64 max_vdisktime(
u64 min_vdisktime,
u64 vdisktime)
863 min_vdisktime = vdisktime;
865 return min_vdisktime;
868 static inline u64 min_vdisktime(
u64 min_vdisktime,
u64 vdisktime)
870 s64 delta = (
s64)(vdisktime - min_vdisktime);
872 min_vdisktime = vdisktime;
874 return min_vdisktime;
894 static inline unsigned cfq_group_get_avg_queues(
struct cfq_data *cfqd,
897 unsigned min_q, max_q;
898 unsigned mult = cfq_hist_divisor - 1;
899 unsigned round = cfq_hist_divisor / 2;
900 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
909 static inline unsigned
917 static inline unsigned
920 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
926 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->
cfqg,
928 unsigned sync_slice = cfqd->
cfq_slice[1];
929 unsigned expect_latency = sync_slice * iq;
930 unsigned group_slice = cfq_group_slice(cfqd, cfqq->
cfqg);
932 if (expect_latency > group_slice) {
937 min(slice, base_low_slice * slice / sync_slice);
940 slice =
max(slice * group_slice / expect_latency,
950 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
963 static inline bool cfq_slice_used(
struct cfq_queue *cfqq)
965 if (cfq_cfqq_slice_new(cfqq))
982 unsigned long back_max;
983 #define CFQ_RQ1_WRAP 0x01
984 #define CFQ_RQ2_WRAP 0x02
987 if (rq1 ==
NULL || rq1 == rq2)
992 if (rq_is_sync(rq1) != rq_is_sync(rq2))
993 return rq_is_sync(rq1) ? rq1 : rq2;
995 if ((rq1->cmd_flags ^ rq2->cmd_flags) &
REQ_PRIO)
996 return rq1->cmd_flags &
REQ_PRIO ? rq1 : rq2;
998 s1 = blk_rq_pos(rq1);
999 s2 = blk_rq_pos(rq2);
1013 else if (s1 + back_max >= last)
1016 wrap |= CFQ_RQ1_WRAP;
1020 else if (s2 + back_max >= last)
1023 wrap |= CFQ_RQ2_WRAP;
1048 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP):
1100 if (root->
left == n)
1102 rb_erase_init(n, &root->
rb);
1120 prev = rb_entry_rq(rbprev);
1123 next = rb_entry_rq(rbnext);
1126 if (rbnext && rbnext != &last->rb_node)
1127 next = rb_entry_rq(rbnext);
1130 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1133 static unsigned long cfq_slice_offset(
struct cfq_data *cfqd,
1139 return (cfqq->
cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1140 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->
ioprio));
1155 s64 key = cfqg_key(st, cfqg);
1158 while (*node !=
NULL) {
1162 if (key < cfqg_key(st, __cfqg))
1173 rb_link_node(&cfqg->
rb_node, parent, node);
1178 cfq_update_group_weight(
struct cfq_group *cfqg)
1192 cfq_update_group_weight(cfqg);
1193 __cfq_group_service_tree_add(st, cfqg);
1219 cfq_group_service_tree_add(st, cfqg);
1227 cfq_rb_erase(&cfqg->
rb_node, st);
1243 cfq_group_service_tree_del(st, cfqg);
1245 cfqg_stats_update_dequeue(cfqg);
1248 static inline unsigned int cfq_cfqq_slice_usage(
struct cfq_queue *cfqq,
1249 unsigned int *unaccounted_time)
1251 unsigned int slice_used;
1284 unsigned int used_sl, charge, unaccounted_sl = 0;
1285 int nr_sync = cfqg->
nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1289 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1291 if (iops_mode(cfqd))
1293 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1297 cfq_group_service_tree_del(st, cfqg);
1298 cfqg->
vdisktime += cfq_scale_slice(charge, cfqg);
1300 cfq_group_service_tree_add(st, cfqg);
1314 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1317 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1318 cfqg_stats_set_start_empty_time(cfqg);
1328 static void cfq_init_cfqg_base(
struct cfq_group *cfqg)
1340 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1341 static void cfq_pd_init(
struct blkcg_gq *blkg)
1343 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1345 cfq_init_cfqg_base(cfqg);
1346 cfqg->
weight = blkg->blkcg->cfq_weight;
1354 struct blkcg *blkcg)
1367 cfqg = blkg_to_cfqg(blkg);
1376 if (!cfq_cfqq_sync(cfqq))
1377 cfqg = cfqq->
cfqd->root_group;
1387 struct cfq_group *cfqg = pd_to_cfqg(pd);
1394 static int cfqg_print_weight_device(
struct cgroup *cgrp,
struct cftype *cft,
1398 cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
1403 static int cfq_print_weight(
struct cgroup *cgrp,
struct cftype *cft,
1406 seq_printf(sf,
"%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
1410 static int cfqg_set_weight_device(
struct cgroup *cgrp,
struct cftype *cft,
1413 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1414 struct blkg_conf_ctx
ctx;
1423 cfqg = blkg_to_cfqg(
ctx.blkg);
1434 static int cfq_set_weight(
struct cgroup *cgrp,
struct cftype *cft,
u64 val)
1436 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1443 spin_lock_irq(&blkcg->lock);
1444 blkcg->cfq_weight = (
unsigned int)val;
1447 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1453 spin_unlock_irq(&blkcg->lock);
1457 static int cfqg_print_stat(
struct cgroup *cgrp,
struct cftype *cft,
1460 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1463 cft->private,
false);
1467 static int cfqg_print_rwstat(
struct cgroup *cgrp,
struct cftype *cft,
1470 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1473 cft->private,
true);
1477 #ifdef CONFIG_DEBUG_BLK_CGROUP
1478 static u64 cfqg_prfill_avg_queue_size(
struct seq_file *sf,
1481 struct cfq_group *cfqg = pd_to_cfqg(pd);
1482 u64 samples = blkg_stat_read(&cfqg->
stats.avg_queue_size_samples);
1486 v = blkg_stat_read(&cfqg->
stats.avg_queue_size_sum);
1494 static int cfqg_print_avg_queue_size(
struct cgroup *cgrp,
struct cftype *cft,
1497 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1500 &blkcg_policy_cfq, 0,
false);
1505 static struct cftype cfq_blkcg_files[] = {
1507 .name =
"weight_device",
1508 .read_seq_string = cfqg_print_weight_device,
1509 .write_string = cfqg_set_weight_device,
1510 .max_write_len = 256,
1514 .read_seq_string = cfq_print_weight,
1515 .write_u64 = cfq_set_weight,
1520 .read_seq_string = cfqg_print_stat,
1525 .read_seq_string = cfqg_print_stat,
1528 .name =
"io_service_bytes",
1530 .read_seq_string = cfqg_print_rwstat,
1533 .name =
"io_serviced",
1535 .read_seq_string = cfqg_print_rwstat,
1538 .name =
"io_service_time",
1540 .read_seq_string = cfqg_print_rwstat,
1543 .name =
"io_wait_time",
1545 .read_seq_string = cfqg_print_rwstat,
1548 .name =
"io_merged",
1550 .read_seq_string = cfqg_print_rwstat,
1553 .name =
"io_queued",
1555 .read_seq_string = cfqg_print_rwstat,
1557 #ifdef CONFIG_DEBUG_BLK_CGROUP
1559 .name =
"avg_queue_size",
1560 .read_seq_string = cfqg_print_avg_queue_size,
1563 .name =
"group_wait_time",
1565 .read_seq_string = cfqg_print_stat,
1568 .name =
"idle_time",
1570 .read_seq_string = cfqg_print_stat,
1573 .name =
"empty_time",
1575 .read_seq_string = cfqg_print_stat,
1580 .read_seq_string = cfqg_print_stat,
1583 .name =
"unaccounted_time",
1585 .read_seq_string = cfqg_print_stat,
1592 struct blkcg *blkcg)
1619 service_tree = service_tree_for(cfqq->
cfqg, cfqq_prio(cfqq),
1624 if (parent && parent != &cfqq->
rb_node) {
1626 rb_key += __cfqq->
rb_key;
1629 }
else if (!add_front) {
1636 rb_key = cfq_slice_offset(cfqd, cfqq) +
jiffies;
1641 __cfqq = cfq_rb_first(service_tree);
1650 if (rb_key == cfqq->
rb_key &&
1661 p = &service_tree->
rb.rb_node;
1685 rb_link_node(&cfqq->
rb_node, parent, p);
1687 service_tree->
count++;
1688 if (add_front || !new_cfqq)
1690 cfq_group_notify_queue_add(cfqd, cfqq->
cfqg);
1713 if (sector > blk_rq_pos(cfqq->
next_rq))
1715 else if (sector < blk_rq_pos(cfqq->
next_rq))
1723 *ret_parent = parent;
1745 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->
p_root,
1746 blk_rq_pos(cfqq->
next_rq), &parent, &p);
1748 rb_link_node(&cfqq->
p_node, parent, p);
1762 if (cfq_cfqq_on_rr(cfqq)) {
1763 cfq_service_tree_add(cfqd, cfqq, 0);
1764 cfq_prio_tree_add(cfqd, cfqq);
1775 BUG_ON(cfq_cfqq_on_rr(cfqq));
1776 cfq_mark_cfqq_on_rr(cfqq);
1778 if (cfq_cfqq_sync(cfqq))
1781 cfq_resort_rr_list(cfqd, cfqq);
1791 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1792 cfq_clear_cfqq_on_rr(cfqq);
1803 cfq_group_notify_queue_del(cfqd, cfqq->
cfqg);
1806 if (cfq_cfqq_sync(cfqq))
1813 static void cfq_del_rq_rb(
struct request *
rq)
1816 const int sync = rq_is_sync(rq);
1836 static void cfq_add_rq_rb(
struct request *rq)
1842 cfqq->
queued[rq_is_sync(rq)]++;
1846 if (!cfq_cfqq_on_rr(cfqq))
1847 cfq_add_cfqq_rr(cfqd, cfqq);
1859 cfq_prio_tree_add(cfqd, cfqq);
1864 static void cfq_reposition_rq_rb(
struct cfq_queue *cfqq,
struct request *rq)
1867 cfqq->
queued[rq_is_sync(rq)]--;
1868 cfqg_stats_update_io_remove(
RQ_CFQG(rq), rq->cmd_flags);
1870 cfqg_stats_update_io_add(
RQ_CFQG(rq), cfqq->
cfqd->serving_group,
1875 cfq_find_rq_fmerge(
struct cfq_data *cfqd,
struct bio *bio)
1885 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1887 sector_t sector = bio->bi_sector + bio_sectors(bio);
1897 struct cfq_data *cfqd = q->elevator->elevator_data;
1908 struct cfq_data *cfqd = q->elevator->elevator_data;
1916 static void cfq_remove_request(
struct request *rq)
1921 cfqq->
next_rq = cfq_find_next_rq(cfqq->
cfqd, cfqq, rq);
1923 list_del_init(&rq->queuelist);
1926 cfqq->
cfqd->rq_queued--;
1927 cfqg_stats_update_io_remove(
RQ_CFQG(rq), rq->cmd_flags);
1937 struct cfq_data *cfqd = q->elevator->elevator_data;
1940 __rq = cfq_find_rq_fmerge(cfqd, bio);
1943 return ELEVATOR_FRONT_MERGE;
1946 return ELEVATOR_NO_MERGE;
1952 if (type == ELEVATOR_FRONT_MERGE) {
1955 cfq_reposition_rq_rb(cfqq, req);
1962 cfqg_stats_update_io_merged(
RQ_CFQG(req), bio->bi_rw);
1970 struct cfq_data *cfqd = q->elevator->elevator_data;
1975 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1976 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1977 list_move(&rq->queuelist, &next->queuelist);
1978 rq_set_fifo_time(rq, rq_fifo_time(next));
1983 cfq_remove_request(next);
1984 cfqg_stats_update_io_merged(
RQ_CFQG(rq), next->cmd_flags);
1994 cfq_del_cfqq_rr(cfqd, cfqq);
2000 struct cfq_data *cfqd = q->elevator->elevator_data;
2007 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2014 cic = cfq_cic_lookup(cfqd,
current->io_context);
2018 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2025 cfqg_stats_update_idle_time(cfqq->
cfqg);
2028 static void __cfq_set_active_queue(
struct cfq_data *cfqd,
2032 cfq_log_cfqq(cfqd, cfqq,
"set_active wl_prio:%d wl_type:%d",
2034 cfqg_stats_update_avg_queue_size(cfqq->
cfqg);
2042 cfq_clear_cfqq_wait_request(cfqq);
2043 cfq_clear_cfqq_must_dispatch(cfqq);
2044 cfq_clear_cfqq_must_alloc_slice(cfqq);
2045 cfq_clear_cfqq_fifo_expire(cfqq);
2046 cfq_mark_cfqq_slice_new(cfqq);
2048 cfq_del_timer(cfqd, cfqq);
2061 cfq_log_cfqq(cfqd, cfqq,
"slice expired t=%d", timed_out);
2063 if (cfq_cfqq_wait_request(cfqq))
2064 cfq_del_timer(cfqd, cfqq);
2066 cfq_clear_cfqq_wait_request(cfqq);
2067 cfq_clear_cfqq_wait_busy(cfqq);
2076 cfq_mark_cfqq_split_coop(cfqq);
2082 if (cfq_cfqq_slice_new(cfqq))
2083 cfqq->
slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2089 cfq_group_served(cfqd, cfqq->
cfqg, cfqq);
2092 cfq_del_cfqq_rr(cfqd, cfqq);
2094 cfq_resort_rr_list(cfqd, cfqq);
2105 static inline void cfq_slice_expired(
struct cfq_data *cfqd,
bool timed_out)
2110 __cfq_slice_expired(cfqd, cfqq, timed_out);
2131 return cfq_rb_first(service_tree);
2144 cfqg = cfq_get_next_cfqg(cfqd);
2149 if ((cfqq = cfq_rb_first(st)) !=
NULL)
2161 cfqq = cfq_get_next_queue(cfqd);
2163 __cfq_set_active_queue(cfqd, cfqq);
2197 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2206 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->
next_rq))
2209 if (blk_rq_pos(__cfqq->
next_rq) < sector)
2217 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->
next_rq))
2240 if (!cfq_cfqq_sync(cur_cfqq))
2248 if (cur_cfqq->
cfqg->nr_cfqq == 1)
2256 cfqq = cfqq_close(cfqd, cur_cfqq);
2267 if (!cfq_cfqq_sync(cfqq))
2301 if (cfq_cfqq_idle_window(cfqq) &&
2309 if (service_tree->
count == 1 && cfq_cfqq_sync(cfqq) &&
2310 !cfq_io_thinktime_big(cfqd, &service_tree->
ttime,
false))
2313 service_tree->
count);
2317 static void cfq_arm_slice_timer(
struct cfq_data *cfqd)
2321 unsigned long sl, group_idle = 0;
2328 if (blk_queue_nonrot(cfqd->
queue) && cfqd->
hw_tag)
2332 WARN_ON(cfq_cfqq_slice_new(cfqq));
2337 if (!cfq_should_idle(cfqd, cfqq)) {
2366 cic->
ttime.ttime_mean);
2371 if (group_idle && cfqq->
cfqg->nr_cfqq > 1)
2374 cfq_mark_cfqq_wait_request(cfqq);
2382 cfqg_stats_set_start_idle_time(cfqq->
cfqg);
2383 cfq_log_cfqq(cfqd, cfqq,
"arm_idle: %lu group_idle: %d", sl,
2384 group_idle ? 1 : 0);
2392 struct cfq_data *cfqd = q->elevator->elevator_data;
2397 cfqq->
next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2398 cfq_remove_request(rq);
2405 cfqg_stats_update_dispatch(cfqq->
cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2415 if (cfq_cfqq_fifo_expire(cfqq))
2418 cfq_mark_cfqq_fifo_expire(cfqq);
2420 if (list_empty(&cfqq->
fifo))
2423 rq = rq_entry_fifo(cfqq->
fifo.next);
2444 static int cfqq_process_refs(
struct cfq_queue *cfqq)
2446 int process_refs, io_refs;
2449 process_refs = cfqq->
ref - io_refs;
2450 BUG_ON(process_refs < 0);
2451 return process_refs;
2456 int process_refs, new_process_refs;
2465 if (!cfqq_process_refs(new_cfqq))
2469 while ((__cfqq = new_cfqq->
new_cfqq)) {
2475 process_refs = cfqq_process_refs(cfqq);
2476 new_process_refs = cfqq_process_refs(new_cfqq);
2481 if (process_refs == 0 || new_process_refs == 0)
2487 if (new_process_refs >= process_refs) {
2489 new_cfqq->
ref += process_refs;
2492 cfqq->
ref += new_process_refs;
2501 bool key_valid =
false;
2502 unsigned long lowest_key = 0;
2507 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2510 lowest_key = queue->
rb_key;
2524 unsigned group_slice;
2528 if (cfq_group_busy_queues_wl(
RT_WORKLOAD, cfqd, cfqg))
2530 else if (cfq_group_busy_queues_wl(
BE_WORKLOAD, cfqd, cfqg))
2567 group_slice = cfq_group_slice(cfqd, cfqg);
2569 slice = group_slice * count /
2571 cfq_group_busy_queues_wl(cfqd->
serving_prio, cfqd, cfqg));
2584 cfqg_busy_async_queues(cfqd, cfqg);
2586 slice =
min_t(
unsigned, slice, tmp);
2596 cfq_log(cfqd,
"workload slice:%d", slice);
2607 cfqg = cfq_rb_first_group(st);
2608 update_min_vdisktime(st);
2612 static void cfq_choose_cfqg(
struct cfq_data *cfqd)
2614 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2626 choose_service_tree(cfqd, cfqg);
2653 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2664 && cfqq->
dispatched && cfq_should_idle(cfqd, cfqq)) {
2668 goto check_group_idle;
2684 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2687 cfq_setup_merge(cfqq, new_cfqq);
2705 if (
CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2706 (cfq_cfqq_slice_new(cfqq) ||
2708 cfq_clear_cfqq_deep(cfqq);
2709 cfq_clear_cfqq_idle_window(cfqq);
2712 if (cfqq->
dispatched && cfq_should_idle(cfqd, cfqq)) {
2723 cfqq->
cfqg->dispatched &&
2724 !cfq_io_thinktime_big(cfqd, &cfqq->
cfqg->ttime,
true)) {
2730 cfq_slice_expired(cfqd, 0);
2737 cfq_choose_cfqg(cfqd);
2739 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2744 static int __cfq_forced_dispatch_cfqq(
struct cfq_queue *cfqq)
2749 cfq_dispatch_insert(cfqq->
cfqd->queue, cfqq->
next_rq);
2756 __cfq_slice_expired(cfqq->
cfqd, cfqq, 0);
2764 static int cfq_forced_dispatch(
struct cfq_data *cfqd)
2770 cfq_slice_expired(cfqd, 0);
2771 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2772 __cfq_set_active_queue(cfqd, cfqq);
2773 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2778 cfq_log(cfqd,
"forced_dispatch=%d", dispatched);
2782 static inline bool cfq_slice_used_soon(
struct cfq_data *cfqd,
2786 if (cfq_cfqq_slice_new(cfqq))
2797 unsigned int max_dispatch;
2819 bool promote_sync =
false;
2834 promote_sync =
true;
2839 if (cfqd->
busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2870 if (depth < max_dispatch)
2871 max_dispatch =
depth;
2890 if (!cfq_may_dispatch(cfqd, cfqq))
2896 rq = cfq_check_fifo(cfqq);
2903 cfq_dispatch_insert(cfqd->
queue, rq);
2908 atomic_long_inc(&cic->
icq.ioc->refcount);
2921 struct cfq_data *cfqd = q->elevator->elevator_data;
2928 return cfq_forced_dispatch(cfqd);
2930 cfqq = cfq_select_queue(cfqd);
2937 if (!cfq_dispatch_request(cfqd, cfqq))
2941 cfq_clear_cfqq_must_dispatch(cfqq);
2947 if (cfqd->
busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2951 cfq_slice_expired(cfqd, 0);
2965 static void cfq_put_queue(
struct cfq_queue *cfqq)
2982 __cfq_slice_expired(cfqd, cfqq, 0);
2983 cfq_schedule_dispatch(cfqd);
2986 BUG_ON(cfq_cfqq_on_rr(cfqq));
2991 static void cfq_put_cooperator(
struct cfq_queue *cfqq)
3002 if (__cfqq == cfqq) {
3003 WARN(1,
"cfqq->new_cfqq loop detected\n");
3007 cfq_put_queue(__cfqq);
3015 __cfq_slice_expired(cfqd, cfqq, 0);
3016 cfq_schedule_dispatch(cfqd);
3019 cfq_put_cooperator(cfqq);
3021 cfq_put_queue(cfqq);
3024 static void cfq_init_icq(
struct io_cq *icq)
3026 struct cfq_io_cq *cic = icq_to_cic(icq);
3031 static void cfq_exit_icq(
struct io_cq *
icq)
3033 struct cfq_io_cq *cic = icq_to_cic(icq);
3034 struct cfq_data *cfqd = cic_to_cfqd(cic);
3052 if (!cfq_cfqq_prio_changed(cfqq))
3056 switch (ioprio_class) {
3063 cfqq->
ioprio = task_nice_ioprio(tsk);
3077 cfq_clear_cfqq_idle_window(cfqq);
3086 cfq_clear_cfqq_prio_changed(cfqq);
3089 static void check_ioprio_changed(
struct cfq_io_cq *cic,
struct bio *bio)
3091 int ioprio = cic->
icq.ioc->ioprio;
3092 struct cfq_data *cfqd = cic_to_cfqd(cic);
3109 cfq_put_queue(cfqq);
3115 cfq_mark_cfqq_prio_changed(cfqq);
3125 INIT_LIST_HEAD(&cfqq->
fifo);
3130 cfq_mark_cfqq_prio_changed(cfqq);
3134 cfq_mark_cfqq_idle_window(cfqq);
3135 cfq_mark_cfqq_sync(cfqq);
3140 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3141 static void check_blkcg_changed(
struct cfq_io_cq *cic,
struct bio *bio)
3143 struct cfq_data *cfqd = cic_to_cfqd(cic);
3148 id = bio_blkcg(bio)->id;
3158 sync_cfqq = cic_to_cfqq(cic, 1);
3165 cic_set_cfqq(cic, NULL, 1);
3166 cfq_put_queue(sync_cfqq);
3172 static inline void check_blkcg_changed(
struct cfq_io_cq *cic,
struct bio *bio) { }
3176 cfq_find_alloc_queue(
struct cfq_data *cfqd,
bool is_sync,
struct cfq_io_cq *cic,
3179 struct blkcg *blkcg;
3186 blkcg = bio_blkcg(bio);
3187 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3188 cfqq = cic_to_cfqq(cic, is_sync);
3194 if (!cfqq || cfqq == &cfqd->
oom_cfqq) {
3201 spin_unlock_irq(cfqd->
queue->queue_lock);
3205 spin_lock_irq(cfqd->
queue->queue_lock);
3215 cfq_init_cfqq(cfqd, cfqq,
current->pid, is_sync);
3216 cfq_init_prio_data(cfqq, cic);
3217 cfq_link_cfqq_cfqg(cfqq, cfqg);
3231 cfq_async_queue_prio(
struct cfq_data *cfqd,
int ioprio_class,
int ioprio)
3233 switch (ioprio_class) {
3250 struct bio *bio,
gfp_t gfp_mask)
3258 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3263 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3268 if (!is_sync && !(*async_cfqq)) {
3278 __cfq_update_io_thinktime(
struct cfq_ttime *ttime,
unsigned long slice_idle)
3281 elapsed =
min(elapsed, 2
UL * slice_idle);
3292 if (cfq_cfqq_sync(cfqq)) {
3297 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3307 sector_t n_sec = blk_rq_sectors(rq);
3316 if (blk_queue_nonrot(cfqd->
queue))
3330 int old_idle, enable_idle;
3338 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3341 cfq_mark_cfqq_deep(cfqq);
3356 if (old_idle != enable_idle) {
3359 cfq_mark_cfqq_idle_window(cfqq);
3361 cfq_clear_cfqq_idle_window(cfqq);
3395 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3401 if (cfq_slice_used(cfqq))
3428 if (!cfqd->
active_cic || !cfq_cfqq_wait_request(cfqq))
3435 if (cfq_rq_close(cfqd, cfqq, rq))
3450 cfq_slice_expired(cfqd, 1);
3456 if (old_type != cfqq_type(cfqq))
3457 cfqq->
cfqg->saved_workload_slice = 0;
3463 BUG_ON(!cfq_cfqq_on_rr(cfqq));
3465 cfq_service_tree_add(cfqd, cfqq, 1);
3468 cfq_mark_cfqq_slice_new(cfqq);
3485 cfq_update_io_thinktime(cfqd, cfqq, cic);
3486 cfq_update_io_seektime(cfqd, cfqq, rq);
3487 cfq_update_idle_window(cfqd, cfqq, cic);
3502 if (cfq_cfqq_wait_request(cfqq)) {
3505 cfq_del_timer(cfqd, cfqq);
3506 cfq_clear_cfqq_wait_request(cfqq);
3509 cfqg_stats_update_idle_time(cfqq->
cfqg);
3510 cfq_mark_cfqq_must_dispatch(cfqq);
3513 }
else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3520 cfq_preempt_queue(cfqd, cfqq);
3527 struct cfq_data *cfqd = q->elevator->elevator_data;
3531 cfq_init_prio_data(cfqq,
RQ_CIC(rq));
3533 rq_set_fifo_time(rq, jiffies + cfqd->
cfq_fifo_expire[rq_is_sync(rq)]);
3538 cfq_rq_enqueued(cfqd, cfqq, rq);
3545 static void cfq_update_hw_tag(
struct cfq_data *cfqd)
3564 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3587 if (cfqq->
cfqg->nr_cfqq > 1)
3591 if (cfq_io_thinktime_big(cfqd, &cfqq->
cfqg->ttime,
true))
3594 if (cfq_slice_used(cfqq))
3619 const int sync = rq_is_sync(rq);
3626 cfq_update_hw_tag(cfqd);
3633 cfqg_stats_update_completion(cfqq->
cfqg, rq_start_time_ns(rq),
3634 rq_io_start_time_ns(rq), rq->cmd_flags);
3641 RQ_CIC(rq)->ttime.last_end_request = now;
3643 if (cfq_cfqq_on_rr(cfqq))
3646 service_tree = service_tree_for(cfqq->
cfqg,
3647 cfqq_prio(cfqq), cfqq_type(cfqq));
3648 service_tree->
ttime.last_end_request = now;
3653 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3654 cfqq->
cfqg->ttime.last_end_request = now;
3664 if (cfq_cfqq_slice_new(cfqq)) {
3665 cfq_set_prio_slice(cfqd, cfqq);
3666 cfq_clear_cfqq_slice_new(cfqq);
3673 if (cfq_should_wait_busy(cfqd, cfqq)) {
3678 cfq_mark_cfqq_wait_busy(cfqq);
3691 cfq_slice_expired(cfqd, 1);
3692 else if (sync && cfqq_empty &&
3693 !cfq_close_cooperator(cfqd, cfqq)) {
3694 cfq_arm_slice_timer(cfqd);
3699 cfq_schedule_dispatch(cfqd);
3702 static inline int __cfq_may_queue(
struct cfq_queue *cfqq)
3704 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3705 cfq_mark_cfqq_must_alloc_slice(cfqq);
3706 return ELV_MQUEUE_MUST;
3709 return ELV_MQUEUE_MAY;
3714 struct cfq_data *cfqd = q->elevator->elevator_data;
3727 return ELV_MQUEUE_MAY;
3729 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3731 cfq_init_prio_data(cfqq, cic);
3733 return __cfq_may_queue(cfqq);
3736 return ELV_MQUEUE_MAY;
3742 static void cfq_put_request(
struct request *rq)
3747 const int rw = rq_data_dir(rq);
3754 rq->elv.priv[0] =
NULL;
3755 rq->elv.priv[1] =
NULL;
3757 cfq_put_queue(cfqq);
3766 cic_set_cfqq(cic, cfqq->
new_cfqq, 1);
3767 cfq_mark_cfqq_coop(cfqq->
new_cfqq);
3768 cfq_put_queue(cfqq);
3769 return cic_to_cfqq(cic, 1);
3779 if (cfqq_process_refs(cfqq) == 1) {
3781 cfq_clear_cfqq_coop(cfqq);
3782 cfq_clear_cfqq_split_coop(cfqq);
3786 cic_set_cfqq(cic, NULL, 1);
3788 cfq_put_cooperator(cfqq);
3790 cfq_put_queue(cfqq);
3800 struct cfq_data *cfqd = q->elevator->elevator_data;
3801 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
3802 const int rw = rq_data_dir(rq);
3803 const bool is_sync = rq_is_sync(rq);
3808 spin_lock_irq(q->queue_lock);
3810 check_ioprio_changed(cic, bio);
3811 check_blkcg_changed(cic, bio);
3813 cfqq = cic_to_cfqq(cic, is_sync);
3814 if (!cfqq || cfqq == &cfqd->
oom_cfqq) {
3815 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
3816 cic_set_cfqq(cic, cfqq, is_sync);
3821 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3823 cfqq = split_cfqq(cic, cfqq);
3835 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3841 cfqg_get(cfqq->
cfqg);
3842 rq->elv.priv[0] = cfqq;
3843 rq->elv.priv[1] = cfqq->
cfqg;
3844 spin_unlock_irq(q->queue_lock);
3854 spin_lock_irq(q->queue_lock);
3856 spin_unlock_irq(q->queue_lock);
3862 static void cfq_idle_slice_timer(
unsigned long data)
3866 unsigned long flags;
3869 cfq_log(cfqd,
"idle timer fired");
3880 if (cfq_cfqq_must_dispatch(cfqq))
3886 if (cfq_slice_used(cfqq))
3905 cfq_clear_cfqq_deep(cfqq);
3908 cfq_slice_expired(cfqd, timed_out);
3910 cfq_schedule_dispatch(cfqd);
3912 spin_unlock_irqrestore(cfqd->
queue->queue_lock, flags);
3915 static void cfq_shutdown_timer_wq(
struct cfq_data *cfqd)
3921 static void cfq_put_async_queues(
struct cfq_data *cfqd)
3936 static void cfq_exit_queue(
struct elevator_queue *
e)
3938 struct cfq_data *cfqd = e->elevator_data;
3941 cfq_shutdown_timer_wq(cfqd);
3943 spin_lock_irq(q->queue_lock);
3948 cfq_put_async_queues(cfqd);
3950 spin_unlock_irq(q->queue_lock);
3952 cfq_shutdown_timer_wq(cfqd);
3954 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3973 q->elevator->elevator_data = cfqd;
3979 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3984 cfqd->
root_group = blkg_to_cfqg(q->root_blkg);
4011 cfq_init_cfqq(cfqd, &cfqd->
oom_cfqq, 1, 0);
4014 spin_lock_irq(q->queue_lock);
4017 spin_unlock_irq(q->queue_lock);
4054 cfq_var_show(
unsigned int var,
char *
page)
4056 return sprintf(page,
"%d\n", var);
4060 cfq_var_store(
unsigned int *var,
const char *
page,
size_t count)
4062 char *p = (
char *) page;
4068 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4069 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4071 struct cfq_data *cfqd = e->elevator_data; \
4072 unsigned int __data = __VAR; \
4074 __data = jiffies_to_msecs(__data); \
4075 return cfq_var_show(__data, (page)); \
4089 #undef SHOW_FUNCTION
4091 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4092 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4094 struct cfq_data *cfqd = e->elevator_data; \
4095 unsigned int __data; \
4096 int ret = cfq_var_store(&__data, (page), count); \
4097 if (__data < (MIN)) \
4099 else if (__data > (MAX)) \
4102 *(__PTR) = msecs_to_jiffies(__data); \
4104 *(__PTR) = __data; \
4123 #undef STORE_FUNCTION
4125 #define CFQ_ATTR(name) \
4126 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4128 static struct elv_fs_entry cfq_attrs[] = {
4144 static struct elevator_type iosched_cfq = {
4146 .elevator_merge_fn = cfq_merge,
4147 .elevator_merged_fn = cfq_merged_request,
4148 .elevator_merge_req_fn = cfq_merged_requests,
4149 .elevator_allow_merge_fn = cfq_allow_merge,
4150 .elevator_bio_merged_fn = cfq_bio_merged,
4151 .elevator_dispatch_fn = cfq_dispatch_requests,
4152 .elevator_add_req_fn = cfq_insert_request,
4153 .elevator_activate_req_fn = cfq_activate_request,
4154 .elevator_deactivate_req_fn = cfq_deactivate_request,
4155 .elevator_completed_req_fn = cfq_completed_request,
4158 .elevator_init_icq_fn = cfq_init_icq,
4159 .elevator_exit_icq_fn = cfq_exit_icq,
4160 .elevator_set_req_fn = cfq_set_request,
4161 .elevator_put_req_fn = cfq_put_request,
4162 .elevator_may_queue_fn = cfq_may_queue,
4163 .elevator_init_fn = cfq_init_queue,
4164 .elevator_exit_fn = cfq_exit_queue,
4167 .icq_align = __alignof__(
struct cfq_io_cq),
4168 .elevator_attrs = cfq_attrs,
4169 .elevator_name =
"cfq",
4173 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4176 .cftypes = cfq_blkcg_files,
4178 .pd_init_fn = cfq_pd_init,
4179 .pd_reset_stats_fn = cfq_pd_reset_stats,
4183 static int __init cfq_init(
void)
4190 if (!cfq_slice_async)
4191 cfq_slice_async = 1;
4192 if (!cfq_slice_idle)
4195 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4196 if (!cfq_group_idle)
4220 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4226 static void __exit cfq_exit(
void)
4228 #ifdef CONFIG_CFQ_GROUP_IOSCHED