14 #include <linux/kdev_t.h>
15 #include <linux/module.h>
18 #include <linux/slab.h>
25 #define MAX_KEY_LEN 100
35 const struct blkcg_policy *
pol)
37 return pol &&
test_bit(pol->plid, q->blkcg_pols);
46 static void blkg_free(
struct blkcg_gq *blkg)
53 for (i = 0; i < BLKCG_MAX_POLS; i++) {
54 struct blkcg_policy *
pol = blkcg_policy[
i];
60 if (pol && pol->pd_exit_fn)
61 pol->pd_exit_fn(blkg);
85 blkg = kzalloc_node(
sizeof(*blkg), gfp_mask, q->node);
90 INIT_LIST_HEAD(&blkg->q_node);
101 for (i = 0; i < BLKCG_MAX_POLS; i++) {
102 struct blkcg_policy *pol = blkcg_policy[
i];
105 if (!blkcg_policy_enabled(q, pol))
109 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
117 if (blkcg_policy_enabled(blkg->q, pol))
118 pol->pd_init_fn(blkg);
128 static struct blkcg_gq *__blkg_lookup(
struct blkcg *blkcg,
134 if (blkg && blkg->q == q)
144 if (blkg && blkg->q == q)
165 return __blkg_lookup(blkcg, q);
173 static struct blkcg_gq *__blkg_lookup_create(
struct blkcg *blkcg,
184 blkg = __blkg_lookup(blkcg, q);
191 if (!css_tryget(&blkcg->css)) {
207 spin_lock(&blkcg->lock);
210 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
211 list_add(&blkg->q_node, &q->blkg_list);
213 spin_unlock(&blkcg->lock);
220 css_put(&blkcg->css);
234 return ERR_PTR(blk_queue_dead(q) ? -
EINVAL : -
EBUSY);
235 return __blkg_lookup_create(blkcg, q,
NULL);
239 static void blkg_destroy(
struct blkcg_gq *blkg)
241 struct blkcg *blkcg = blkg->blkcg;
251 list_del_init(&blkg->q_node);
252 hlist_del_init_rcu(&blkg->blkcg_node);
282 struct blkcg *blkcg = blkg->blkcg;
284 spin_lock(&blkcg->lock);
286 spin_unlock(&blkcg->lock);
294 q->root_rl.blkg =
NULL;
305 css_put(&blkg->blkcg->css);
316 call_rcu(&blkg->rcu_head, blkg_rcu_free);
334 if (rl == &q->root_rl) {
346 if (ent == &q->root_blkg->q_node)
348 if (ent == &q->blkg_list)
355 static int blkcg_reset_stats(
struct cgroup *cgroup,
struct cftype *cftype,
358 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
364 spin_lock_irq(&blkcg->lock);
372 for (i = 0; i < BLKCG_MAX_POLS; i++) {
373 struct blkcg_policy *pol = blkcg_policy[
i];
375 if (blkcg_policy_enabled(blkg->q, pol) &&
376 pol->pd_reset_stats_fn)
377 pol->pd_reset_stats_fn(blkg);
381 spin_unlock_irq(&blkcg->lock);
386 static const char *blkg_dev_name(
struct blkcg_gq *blkg)
389 if (blkg->q->backing_dev_info.dev)
390 return dev_name(blkg->q->backing_dev_info.dev);
414 const struct blkcg_policy *pol,
int data,
421 spin_lock_irq(&blkcg->lock);
423 if (blkcg_policy_enabled(blkg->q, pol))
424 total += prfill(sf, blkg->pd[pol->plid], data);
425 spin_unlock_irq(&blkcg->lock);
428 seq_printf(sf,
"Total %llu\n", (
unsigned long long)total);
442 const char *dname = blkg_dev_name(pd->blkg);
447 seq_printf(sf,
"%s %llu\n", dname, (
unsigned long long)v);
461 const struct blkg_rwstat *rwstat)
463 static const char *rwstr[] = {
464 [BLKG_RWSTAT_READ] =
"Read",
465 [BLKG_RWSTAT_WRITE] =
"Write",
466 [BLKG_RWSTAT_SYNC] =
"Sync",
467 [BLKG_RWSTAT_ASYNC] =
"Async",
469 const char *dname = blkg_dev_name(pd->blkg);
476 for (i = 0; i < BLKG_RWSTAT_NR; i++)
477 seq_printf(sf,
"%s %s %llu\n", dname, rwstr[i],
478 (
unsigned long long)rwstat->cnt[i]);
480 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
481 seq_printf(sf,
"%s Total %llu\n", dname, (
unsigned long long)v);
510 struct blkg_rwstat rwstat = blkg_rwstat_read((
void *)pd + off);
529 const char *
input,
struct blkg_conf_ctx *
ctx)
532 struct gendisk *disk;
535 unsigned long long v;
538 if (
sscanf(
input,
"%u:%u %llu", &major, &minor, &v) != 3)
546 spin_lock_irq(disk->queue->queue_lock);
548 if (blkcg_policy_enabled(disk->queue, pol))
556 spin_unlock_irq(disk->queue->queue_lock);
566 ret = restart_syscall();
588 spin_unlock_irq(
ctx->disk->queue->queue_lock);
596 .name =
"reset_stats",
597 .write_u64 = blkcg_reset_stats,
613 static int blkcg_pre_destroy(
struct cgroup *cgroup)
615 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
617 spin_lock_irq(&blkcg->lock);
619 while (!hlist_empty(&blkcg->blkg_list)) {
624 if (spin_trylock(q->queue_lock)) {
626 spin_unlock(q->queue_lock);
628 spin_unlock_irq(&blkcg->lock);
630 spin_lock_irq(&blkcg->lock);
634 spin_unlock_irq(&blkcg->lock);
638 static void blkcg_destroy(
struct cgroup *cgroup)
640 struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
646 static struct cgroup_subsys_state *blkcg_create(
struct cgroup *cgroup)
650 struct cgroup *
parent = cgroup->parent;
709 spin_lock_irq(q->queue_lock);
711 spin_unlock_irq(q->queue_lock);
722 static int blkcg_can_attach(
struct cgroup *cgrp,
struct cgroup_taskset *tset)
729 cgroup_taskset_for_each(task, cgrp, tset) {
743 .create = blkcg_create,
744 .can_attach = blkcg_can_attach,
745 .pre_destroy = blkcg_pre_destroy,
746 .destroy = blkcg_destroy,
747 .subsys_id = blkio_subsys_id,
757 .broken_hierarchy =
true,
778 const struct blkcg_policy *pol)
786 if (blkcg_policy_enabled(q, pol))
799 spin_lock_irq(q->queue_lock);
802 blkg = __blkg_lookup_create(&
blkcg_root, q, blkg);
806 radix_tree_preload_end();
813 q->root_rl.blkg = blkg;
818 spin_unlock_irq(q->queue_lock);
822 pd = kzalloc_node(pol->pd_size,
GFP_KERNEL, q->node);
834 spin_lock_irq(q->queue_lock);
837 if (
WARN_ON(list_empty(&pds))) {
843 list_del_init(&pd->alloc_node);
846 spin_lock(&blkg->blkcg->lock);
848 blkg->pd[pol->plid] = pd;
850 pol->pd_init_fn(blkg);
852 spin_unlock(&blkg->blkcg->lock);
858 spin_unlock_irq(q->queue_lock);
876 const struct blkcg_policy *pol)
880 if (!blkcg_policy_enabled(q, pol))
884 spin_lock_irq(q->queue_lock);
889 if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
894 spin_lock(&blkg->blkcg->lock);
897 pol->pd_exit_fn(blkg);
899 kfree(blkg->pd[pol->plid]);
900 blkg->pd[pol->plid] =
NULL;
902 spin_unlock(&blkg->blkcg->lock);
905 spin_unlock_irq(q->queue_lock);
928 for (i = 0; i < BLKCG_MAX_POLS; i++)
929 if (!blkcg_policy[i])
931 if (i >= BLKCG_MAX_POLS)
936 blkcg_policy[
i] =
pol;
958 if (
WARN_ON(blkcg_policy[pol->plid] != pol))
966 blkcg_policy[pol->plid] =
NULL;