7 #include <linux/module.h>
8 #include <linux/slab.h>
11 #include <linux/blktrace_api.h>
16 static int throtl_grp_quantum = 8;
19 static int throtl_quantum = 32;
22 static unsigned long throtl_slice =
HZ/10;
28 static void throtl_schedule_delayed_work(
struct throtl_data *
td,
38 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
39 .count = 0, .min_disptime = 0}
41 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
123 static void tg_stats_alloc_fn(
struct work_struct *);
133 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
138 return pd_to_blkg(&tg->
pd);
143 return blkg_to_tg(td->
queue->root_blkg);
150 #define THROTL_TG_FNS(name) \
151 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
153 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
155 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
157 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
159 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
161 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
166 #define throtl_log_tg(td, tg, fmt, args...) do { \
169 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
170 blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
173 #define throtl_log(td, fmt, args...) \
174 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
176 static inline unsigned int total_nr_queued(
struct throtl_data *
td)
202 spin_lock_irq(&tg_stats_alloc_lock);
204 if (!list_empty(&tg_stats_alloc_list)) {
212 empty = list_empty(&tg_stats_alloc_list);
213 spin_unlock_irq(&tg_stats_alloc_lock);
218 static void throtl_pd_init(
struct blkcg_gq *blkg)
241 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
244 static void throtl_pd_exit(
struct blkcg_gq *blkg)
251 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
256 static void throtl_pd_reset_stats(
struct blkcg_gq *blkg)
280 return td_root_tg(td);
304 tg = blkg_to_tg(blkg);
305 else if (!blk_queue_dead(q))
337 rb_erase_init(n, &root->
rb);
345 tg = throtl_rb_first(st);
361 while (*node !=
NULL) {
376 rb_link_node(&tg->
rb_node, parent, node);
384 tg_service_tree_add(st, tg);
385 throtl_mark_tg_on_rr(tg);
391 if (!throtl_tg_on_rr(tg))
392 __throtl_enqueue_tg(td, tg);
398 throtl_clear_tg_on_rr(tg);
403 if (throtl_tg_on_rr(tg))
404 __throtl_dequeue_tg(td, tg);
407 static void throtl_schedule_next_dispatch(
struct throtl_data *td)
414 if (!total_nr_queued(td))
419 update_min_dispatch_time(st);
422 throtl_schedule_delayed_work(td, 0);
424 throtl_schedule_delayed_work(td, (st->
min_disptime - jiffies));
434 throtl_log_tg(td, tg,
"[%c] new slice start=%lu end=%lu jiffies=%lu",
439 static inline void throtl_set_slice_end(
struct throtl_data *td,
440 struct throtl_grp *tg,
bool rw,
unsigned long jiffy_end)
445 static inline void throtl_extend_slice(
struct throtl_data *td,
446 struct throtl_grp *tg,
bool rw,
unsigned long jiffy_end)
449 throtl_log_tg(td, tg,
"[%c] extend slice start=%lu end=%lu jiffies=%lu",
468 unsigned long nr_slices, time_elapsed, io_trim;
478 if (throtl_slice_used(td, tg, rw))
489 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
493 nr_slices = time_elapsed / throtl_slice;
497 tmp = tg->
bps[
rw] * throtl_slice * nr_slices;
501 io_trim = (tg->
iops[
rw] * throtl_slice * nr_slices)/
HZ;
503 if (!bytes_trim && !io_trim)
511 if (tg->
io_disp[rw] >= io_trim)
518 throtl_log_tg(td, tg,
"[%c] trim slice nr=%lu bytes=%llu io=%lu"
519 " start=%lu end=%lu jiffies=%lu",
520 rw ==
READ ?
'R' :
'W', nr_slices, bytes_trim, io_trim,
525 struct bio *bio,
unsigned long *
wait)
527 bool rw = bio_data_dir(bio);
528 unsigned int io_allowed;
529 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
536 jiffy_elapsed_rnd = throtl_slice;
538 jiffy_elapsed_rnd =
roundup(jiffy_elapsed_rnd, throtl_slice);
547 tmp = (
u64)tg->
iops[rw] * jiffy_elapsed_rnd;
555 if (tg->
io_disp[rw] + 1 <= io_allowed) {
564 if (jiffy_wait > jiffy_elapsed)
565 jiffy_wait = jiffy_wait - jiffy_elapsed;
575 struct bio *bio,
unsigned long *wait)
577 bool rw = bio_data_dir(bio);
578 u64 bytes_allowed, extra_bytes,
tmp;
579 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
585 jiffy_elapsed_rnd = throtl_slice;
587 jiffy_elapsed_rnd =
roundup(jiffy_elapsed_rnd, throtl_slice);
589 tmp = tg->
bps[
rw] * jiffy_elapsed_rnd;
593 if (tg->
bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
600 extra_bytes = tg->
bytes_disp[
rw] + bio->bi_size - bytes_allowed;
601 jiffy_wait = div64_u64(extra_bytes *
HZ, tg->
bps[rw]);
610 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
616 static bool tg_no_rule_group(
struct throtl_grp *tg,
bool rw) {
617 if (tg->
bps[rw] == -1 && tg->
iops[rw] == -1)
627 struct bio *bio,
unsigned long *wait)
629 bool rw = bio_data_dir(bio);
630 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
641 if (tg->
bps[rw] == -1 && tg->
iops[rw] == -1) {
652 if (throtl_slice_used(td, tg, rw))
653 throtl_start_new_slice(td, tg, rw);
656 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
659 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
660 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
666 max_wait =
max(bps_wait, iops_wait);
672 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
697 blkg_rwstat_add(&stats_cpu->
serviced, rw, 1);
703 static void throtl_charge_bio(
struct throtl_grp *tg,
struct bio *bio)
705 bool rw = bio_data_dir(bio);
711 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
717 bool rw = bio_data_dir(bio);
721 blkg_get(tg_to_blkg(tg));
724 throtl_enqueue_tg(td, tg);
729 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
733 tg_may_dispatch(td, tg, bio, &read_wait);
736 tg_may_dispatch(td, tg, bio, &write_wait);
738 min_wait =
min(read_wait, write_wait);
742 throtl_dequeue_tg(td, tg);
744 throtl_enqueue_tg(td, tg);
755 blkg_put(tg_to_blkg(tg));
760 throtl_charge_bio(tg, bio);
761 bio_list_add(bl, bio);
764 throtl_trim_slice(td, tg, rw);
770 unsigned int nr_reads = 0, nr_writes = 0;
771 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
772 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
778 && tg_may_dispatch(td, tg, bio,
NULL)) {
780 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
783 if (nr_reads >= max_nr_reads)
788 && tg_may_dispatch(td, tg, bio,
NULL)) {
790 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
793 if (nr_writes >= max_nr_writes)
797 return nr_reads + nr_writes;
802 unsigned int nr_disp = 0;
807 tg = throtl_rb_first(st);
815 throtl_dequeue_tg(td, tg);
817 nr_disp += throtl_dispatch_tg(td, tg, bl);
820 tg_update_disptime(td, tg);
821 throtl_enqueue_tg(td, tg);
824 if (nr_disp >= throtl_quantum)
831 static void throtl_process_limit_change(
struct throtl_data *td)
862 throtl_start_new_slice(td, tg, 0);
863 throtl_start_new_slice(td, tg, 1);
865 if (throtl_tg_on_rr(tg))
866 tg_update_disptime(td, tg);
874 unsigned int nr_disp = 0;
879 spin_lock_irq(q->queue_lock);
881 throtl_process_limit_change(td);
883 if (!total_nr_queued(td))
886 bio_list_init(&bio_list_on_stack);
888 throtl_log(td,
"dispatch nr_queued=%u read=%u write=%u",
892 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
897 throtl_schedule_next_dispatch(td);
899 spin_unlock_irq(q->queue_lock);
907 while((bio = bio_list_pop(&bio_list_on_stack)))
933 throtl_log(td,
"schedule work. delay=%lu jiffies=%lu",
942 struct blkg_rwstat rwstat = { },
tmp;
948 tmp = blkg_rwstat_read((
void *)sc + off);
949 for (i = 0; i < BLKG_RWSTAT_NR; i++)
950 rwstat.cnt[i] += tmp.cnt[i];
956 static int tg_print_cpu_rwstat(
struct cgroup *cgrp,
struct cftype *cft,
959 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
970 u64 v = *(
u64 *)((
void *)tg + off);
981 unsigned int v = *(
unsigned int *)((
void *)tg + off);
988 static int tg_print_conf_u64(
struct cgroup *cgrp,
struct cftype *cft,
992 &blkcg_policy_throtl, cft->
private,
false);
996 static int tg_print_conf_uint(
struct cgroup *cgrp,
struct cftype *cft,
1000 &blkcg_policy_throtl, cft->
private,
false);
1004 static int tg_set_conf(
struct cgroup *cgrp,
struct cftype *cft,
const char *
buf,
1007 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1008 struct blkg_conf_ctx
ctx;
1017 tg = blkg_to_tg(
ctx.blkg);
1018 td =
ctx.blkg->q->td;
1024 *(
u64 *)((
void *)tg + cft->private) =
ctx.v;
1026 *(
unsigned int *)((
void *)tg + cft->private) =
ctx.v;
1031 throtl_schedule_delayed_work(td, 0);
1037 static int tg_set_conf_u64(
struct cgroup *cgrp,
struct cftype *cft,
1040 return tg_set_conf(cgrp, cft, buf,
true);
1043 static int tg_set_conf_uint(
struct cgroup *cgrp,
struct cftype *cft,
1046 return tg_set_conf(cgrp, cft, buf,
false);
1049 static struct cftype throtl_files[] = {
1051 .name =
"throttle.read_bps_device",
1053 .read_seq_string = tg_print_conf_u64,
1054 .write_string = tg_set_conf_u64,
1055 .max_write_len = 256,
1058 .name =
"throttle.write_bps_device",
1060 .read_seq_string = tg_print_conf_u64,
1061 .write_string = tg_set_conf_u64,
1062 .max_write_len = 256,
1065 .name =
"throttle.read_iops_device",
1067 .read_seq_string = tg_print_conf_uint,
1068 .write_string = tg_set_conf_uint,
1069 .max_write_len = 256,
1072 .name =
"throttle.write_iops_device",
1074 .read_seq_string = tg_print_conf_uint,
1075 .write_string = tg_set_conf_uint,
1076 .max_write_len = 256,
1079 .name =
"throttle.io_service_bytes",
1081 .read_seq_string = tg_print_cpu_rwstat,
1084 .name =
"throttle.io_serviced",
1086 .read_seq_string = tg_print_cpu_rwstat,
1100 .cftypes = throtl_files,
1102 .pd_init_fn = throtl_pd_init,
1103 .pd_exit_fn = throtl_pd_exit,
1104 .pd_reset_stats_fn = throtl_pd_reset_stats,
1111 bool rw = bio_data_dir(bio), update_disptime =
true;
1112 struct blkcg *blkcg;
1113 bool throttled =
false;
1126 blkcg = bio_blkcg(bio);
1127 tg = throtl_lookup_tg(td, blkcg);
1129 if (tg_no_rule_group(tg, rw)) {
1130 throtl_update_dispatch_stats(tg_to_blkg(tg),
1131 bio->bi_size, bio->bi_rw);
1132 goto out_unlock_rcu;
1140 spin_lock_irq(q->queue_lock);
1141 tg = throtl_lookup_create_tg(td, blkcg);
1150 update_disptime =
false;
1156 if (tg_may_dispatch(td, tg, bio,
NULL)) {
1157 throtl_charge_bio(tg, bio);
1170 throtl_trim_slice(td, tg, rw);
1176 " iodisp=%u iops=%u queued=%d/%d",
1177 rw ==
READ ?
'R' :
'W',
1182 bio_associate_current(bio);
1183 throtl_add_bio_tg(q->td, tg, bio);
1186 if (update_disptime) {
1187 tg_update_disptime(td, tg);
1188 throtl_schedule_next_dispatch(td);
1192 spin_unlock_irq(q->queue_lock);
1214 queue_lockdep_assert_held(q);
1218 while ((tg = throtl_rb_first(st))) {
1219 throtl_dequeue_tg(td, tg);
1222 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1224 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1226 spin_unlock_irq(q->queue_lock);
1228 while ((bio = bio_list_pop(&bl)))
1231 spin_lock_irq(q->queue_lock);
1239 td = kzalloc_node(
sizeof(*td),
GFP_KERNEL, q->node);
1260 throtl_shutdown_wq(q);
1265 static int __init throtl_init(
void)
1268 if (!kthrotld_workqueue)
1269 panic(
"Failed to create kthrotld\n");