23 #define THROTL_IOPS_MAX UINT_MAX
26 #define CFQ_WEIGHT_MIN 10
27 #define CFQ_WEIGHT_MAX 1000
28 #define CFQ_WEIGHT_DEFAULT 500
30 #ifdef CONFIG_BLK_CGROUP
32 enum blkg_rwstat_type {
39 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
45 struct cgroup_subsys_state css;
56 unsigned int cfq_weight;
98 struct request_list rl;
107 typedef void (blkcg_pol_init_pd_fn)(
struct blkcg_gq *blkg);
108 typedef void (blkcg_pol_exit_pd_fn)(
struct blkcg_gq *blkg);
109 typedef void (blkcg_pol_reset_pd_stats_fn)(
struct blkcg_gq *blkg);
116 struct cftype *cftypes;
119 blkcg_pol_init_pd_fn *pd_init_fn;
120 blkcg_pol_exit_pd_fn *pd_exit_fn;
121 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
148 const struct blkg_rwstat *rwstat);
153 struct blkg_conf_ctx {
154 struct gendisk *disk;
160 const char *
input,
struct blkg_conf_ctx *
ctx);
164 static inline struct blkcg *cgroup_to_blkcg(
struct cgroup *cgroup)
166 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
170 static inline struct blkcg *task_blkcg(
struct task_struct *tsk)
172 return container_of(task_subsys_state(tsk, blkio_subsys_id),
176 static inline struct blkcg *bio_blkcg(
struct bio *bio)
178 if (bio && bio->bi_css)
193 return blkg ? blkg->pd[pol->plid] :
NULL;
204 return pd ? pd->blkg :
NULL;
220 ret =
cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
223 strncpy(buf,
"<unavailable>", buflen);
233 static inline void blkg_get(
struct blkcg_gq *blkg)
248 static inline void blkg_put(
struct blkcg_gq *blkg)
266 static inline struct request_list *blk_get_rl(
struct request_queue *
q,
274 blkcg = bio_blkcg(bio);
304 static inline void blk_put_rl(
struct request_list *rl)
307 if (rl->blkg && rl->blkg->blkcg != &
blkcg_root)
319 static inline void blk_rq_set_rl(
struct request *
rq,
struct request_list *rl)
330 static inline struct request_list *blk_rq_rl(
struct request *
rq)
342 #define blk_queue_for_each_rl(rl, q) \
343 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
355 u64_stats_update_begin(&stat->syncp);
357 u64_stats_update_end(&stat->syncp);
367 static inline uint64_t blkg_stat_read(
struct blkg_stat *
stat)
373 start = u64_stats_fetch_begin(&stat->syncp);
375 }
while (u64_stats_fetch_retry(&stat->syncp, start));
384 static inline void blkg_stat_reset(
struct blkg_stat *stat)
398 static inline void blkg_rwstat_add(
struct blkg_rwstat *rwstat,
401 u64_stats_update_begin(&rwstat->syncp);
404 rwstat->cnt[BLKG_RWSTAT_WRITE] +=
val;
406 rwstat->cnt[BLKG_RWSTAT_READ] +=
val;
408 rwstat->cnt[BLKG_RWSTAT_SYNC] +=
val;
410 rwstat->cnt[BLKG_RWSTAT_ASYNC] +=
val;
412 u64_stats_update_end(&rwstat->syncp);
423 static inline struct blkg_rwstat blkg_rwstat_read(
struct blkg_rwstat *rwstat)
426 struct blkg_rwstat
tmp;
429 start = u64_stats_fetch_begin(&rwstat->syncp);
431 }
while (u64_stats_fetch_retry(&rwstat->syncp, start));
444 static inline uint64_t blkg_rwstat_sum(
struct blkg_rwstat *rwstat)
446 struct blkg_rwstat
tmp = blkg_rwstat_read(rwstat);
448 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
455 static inline void blkg_rwstat_reset(
struct blkg_rwstat *rwstat)
457 memset(rwstat->cnt, 0,
sizeof(rwstat->cnt));
485 static inline struct blkcg *cgroup_to_blkcg(
struct cgroup *cgroup) {
return NULL; }
486 static inline struct blkcg *bio_blkcg(
struct bio *bio) {
return NULL; }
491 static inline char *blkg_path(
struct blkcg_gq *blkg) {
return NULL; }
492 static inline void blkg_get(
struct blkcg_gq *blkg) { }
493 static inline void blkg_put(
struct blkcg_gq *blkg) { }
495 static inline struct request_list *blk_get_rl(
struct request_queue *q,
496 struct bio *bio) {
return &q->root_rl; }
497 static inline void blk_put_rl(
struct request_list *rl) { }
498 static inline void blk_rq_set_rl(
struct request *
rq,
struct request_list *rl) { }
499 static inline struct request_list *blk_rq_rl(
struct request *
rq) {
return &rq->q->root_rl; }
501 #define blk_queue_for_each_rl(rl, q) \
502 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)