2 #include <linux/sched.h>
16 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
17 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
18 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
25 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
26 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
27 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
32 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
34 #define NICE_0_LOAD SCHED_LOAD_SCALE
35 #define NICE_0_SHIFT SCHED_LOAD_SHIFT
44 #define RUNTIME_INF ((u64)~0ULL)
46 static inline int rt_policy(
int policy)
53 static inline int task_has_rt_policy(
struct task_struct *
p)
55 return rt_policy(p->
policy);
76 #ifdef CONFIG_CGROUP_SCHED
86 #ifdef CONFIG_CFS_BANDWIDTH
93 int idle, timer_active;
94 struct hrtimer period_timer, slack_timer;
98 int nr_periods, nr_throttled;
105 struct cgroup_subsys_state css;
107 #ifdef CONFIG_FAIR_GROUP_SCHED
112 unsigned long shares;
117 #ifdef CONFIG_RT_GROUP_SCHED
127 struct task_group *
parent;
131 #ifdef CONFIG_SCHED_AUTOGROUP
132 struct autogroup *autogroup;
138 #ifdef CONFIG_FAIR_GROUP_SCHED
139 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
149 #define MIN_SHARES (1UL << 1)
150 #define MAX_SHARES (1UL << 18)
158 typedef int (*tg_visitor)(
struct task_group *,
void *);
160 extern int walk_tg_tree_from(
struct task_group *
from,
169 static inline int walk_tg_tree(tg_visitor
down, tg_visitor
up,
void *
data)
171 return walk_tg_tree_from(&root_task_group, down, up, data);
174 extern int tg_nop(
struct task_group *tg,
void *
data);
179 extern void init_tg_cfs_entry(
struct task_group *tg,
struct cfs_rq *
cfs_rq,
183 extern int sched_group_set_shares(
struct task_group *tg,
unsigned long shares);
185 extern void __refill_cfs_bandwidth_runtime(
struct cfs_bandwidth *cfs_b);
186 extern void __start_cfs_bandwidth(
struct cfs_bandwidth *cfs_b);
191 extern void init_tg_rt_entry(
struct task_group *tg,
struct rt_rq *
rt_rq,
221 #ifdef CONFIG_SCHED_DEBUG
222 unsigned int nr_spread_over;
225 #ifdef CONFIG_FAIR_GROUP_SCHED
238 struct task_group *tg;
247 unsigned long h_load;
258 u64 load_stamp, load_last, load_unacc_exec_time;
260 unsigned long load_contribution;
262 #ifdef CONFIG_CFS_BANDWIDTH
265 s64 runtime_remaining;
267 u64 throttled_timestamp;
268 int throttled, throttle_count;
274 static inline int rt_bandwidth_enabled(
void)
283 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
292 unsigned long rt_nr_migratory;
293 unsigned long rt_nr_total;
303 #ifdef CONFIG_RT_GROUP_SCHED
304 unsigned long rt_nr_boosted;
308 struct task_group *tg;
337 extern struct root_domain def_root_domain;
357 #define CPU_LOAD_IDX_MAX 5
362 unsigned long nohz_flags;
374 #ifdef CONFIG_FAIR_GROUP_SCHED
378 unsigned long h_load_throttle;
382 #ifdef CONFIG_RT_GROUP_SCHED
404 struct root_domain *
rd;
405 struct sched_domain *
sd;
407 unsigned long cpu_power;
409 unsigned char idle_balance;
427 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
430 #ifdef CONFIG_PARAVIRT
433 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
434 u64 prev_steal_time_rq;
441 #ifdef CONFIG_SCHED_HRTICK
443 int hrtick_csd_pending;
449 #ifdef CONFIG_SCHEDSTATS
451 struct sched_info rq_sched_info;
452 unsigned long long rq_cpu_time;
456 unsigned int yld_count;
459 unsigned int sched_count;
460 unsigned int sched_goidle;
463 unsigned int ttwu_count;
464 unsigned int ttwu_local;
472 static inline int cpu_of(
struct rq *
rq)
483 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
484 #define this_rq() (&__get_cpu_var(runqueues))
485 #define task_rq(p) cpu_rq(task_cpu(p))
486 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
487 #define raw_rq() (&__raw_get_cpu_var(runqueues))
491 #define rcu_dereference_check_sched_domain(p) \
492 rcu_dereference_check((p), \
493 lockdep_is_held(&sched_domains_mutex))
502 #define for_each_domain(cpu, __sd) \
503 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
504 __sd; __sd = __sd->parent)
506 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
517 static inline struct sched_domain *highest_flag_domain(
int cpu,
int flag)
519 struct sched_domain *
sd, *hsd =
NULL;
521 for_each_domain(cpu, sd) {
522 if (!(sd->flags & flag))
533 extern int group_balance_cpu(
struct sched_group *
sg);
540 #ifdef CONFIG_CGROUP_SCHED
555 static inline struct task_group *task_group(
struct task_struct *
p)
557 return p->sched_task_group;
561 static inline void set_task_rq(
struct task_struct *
p,
unsigned int cpu)
563 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
564 struct task_group *tg = task_group(p);
567 #ifdef CONFIG_FAIR_GROUP_SCHED
568 p->
se.cfs_rq = tg->cfs_rq[
cpu];
569 p->
se.parent = tg->se[
cpu];
572 #ifdef CONFIG_RT_GROUP_SCHED
573 p->
rt.rt_rq = tg->rt_rq[
cpu];
574 p->
rt.parent = tg->rt_se[
cpu];
580 static inline void set_task_rq(
struct task_struct *
p,
unsigned int cpu) { }
581 static inline struct task_group *task_group(
struct task_struct *
p)
588 static inline void __set_task_cpu(
struct task_struct *
p,
unsigned int cpu)
605 #ifdef CONFIG_SCHED_DEBUG
607 # define const_debug __read_mostly
609 # define const_debug const
614 #define SCHED_FEAT(name, enabled) \
615 __SCHED_FEAT_##name ,
624 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
627 return static_key_true(key);
632 return static_key_false(key);
635 #define SCHED_FEAT(name, enabled) \
636 static __always_inline bool static_branch_##name(struct static_key *key) \
638 return static_branch__##enabled(key); \
646 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
648 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
651 static inline u64 global_rt_period(
void)
656 static inline u64 global_rt_runtime(
void)
668 return rq->
curr ==
p;
676 return task_current(rq, p);
681 #ifndef prepare_arch_switch
682 # define prepare_arch_switch(next) do { } while (0)
684 #ifndef finish_arch_switch
685 # define finish_arch_switch(prev) do { } while (0)
687 #ifndef finish_arch_post_lock_switch
688 # define finish_arch_post_lock_switch() do { } while (0)
691 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
715 #ifdef CONFIG_DEBUG_SPINLOCK
759 static inline void update_load_add(
struct load_weight *lw,
unsigned long inc)
765 static inline void update_load_sub(
struct load_weight *lw,
unsigned long dec)
771 static inline void update_load_set(
struct load_weight *lw,
unsigned long w)
786 #define WEIGHT_IDLEPRIO 3
787 #define WMULT_IDLEPRIO 1431655765
801 static const int prio_to_weight[40] = {
802 88761, 71755, 56483, 46273, 36291,
803 29154, 23254, 18705, 14949, 11916,
804 9548, 7620, 6100, 4904, 3906,
805 3121, 2501, 1991, 1586, 1277,
806 1024, 820, 655, 526, 423,
807 335, 272, 215, 172, 137,
819 static const u32 prio_to_wmult[40] = {
820 48388, 59856, 76040, 92818, 118348,
821 147320, 184698, 229616, 287308, 360437,
822 449829, 563644, 704093, 875809, 1099582,
823 1376151, 1717300, 2157191, 2708050, 3363326,
824 4194304, 5237765, 6557202, 8165337, 10153587,
825 12820798, 15790321, 19976592, 24970740, 31350126,
826 39045157, 49367440, 61356676, 76695844, 95443717,
827 119304647, 148102320, 186737708, 238609294, 286331153,
839 #define sched_class_highest (&stop_sched_class)
840 #define for_each_class(class) \
841 for (class = sched_class_highest; class; class = class->next)
851 extern void trigger_load_balance(
struct rq *
rq,
int cpu);
852 extern void idle_balance(
int this_cpu,
struct rq *
this_rq);
856 static inline void idle_balance(
int cpu,
struct rq *
rq)
878 #ifdef CONFIG_CGROUP_CPUACCT
882 struct cgroup_subsys_state css;
888 extern struct cgroup_subsys cpuacct_subsys;
889 extern struct cpuacct root_cpuacct;
892 static inline struct cpuacct *cgroup_ca(
struct cgroup *cgrp)
894 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
895 struct cpuacct, css);
899 static inline struct cpuacct *task_ca(
struct task_struct *tsk)
901 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
902 struct cpuacct, css);
905 static inline struct cpuacct *parent_ca(
struct cpuacct *
ca)
907 if (!ca || !ca->css.cgroup->parent)
909 return cgroup_ca(ca->css.cgroup->parent);
914 static inline void cpuacct_charge(
struct task_struct *tsk,
u64 cputime) {}
917 #ifdef CONFIG_PARAVIRT
918 static inline u64 steal_ticks(
u64 steal)
923 return __iter_div_u64_rem(steal,
TICK_NSEC, &steal);
927 static inline void inc_nr_running(
struct rq *
rq)
932 static inline void dec_nr_running(
struct rq *
rq)
948 static inline u64 sched_avg_period(
void)
953 #ifdef CONFIG_SCHED_HRTICK
960 static inline int hrtick_enabled(
struct rq *
rq)
966 return hrtimer_is_hres_active(&rq->hrtick_timer);
969 void hrtick_start(
struct rq *rq,
u64 delay);
973 static inline int hrtick_enabled(
struct rq *rq)
981 extern void sched_avg_update(
struct rq *rq);
982 static inline void sched_rt_avg_update(
struct rq *rq,
u64 rt_delta)
984 rq->rt_avg += rt_delta;
985 sched_avg_update(rq);
988 static inline void sched_rt_avg_update(
struct rq *rq,
u64 rt_delta) { }
989 static inline void sched_avg_update(
struct rq *rq) { }
995 #ifdef CONFIG_PREEMPT
997 static inline void double_rq_lock(
struct rq *rq1,
struct rq *rq2);
1007 static inline int _double_lock_balance(
struct rq *
this_rq,
struct rq *busiest)
1013 double_rq_lock(
this_rq, busiest);
1026 static inline int _double_lock_balance(
struct rq *
this_rq,
struct rq *busiest)
1052 static inline int double_lock_balance(
struct rq *
this_rq,
struct rq *busiest)
1060 return _double_lock_balance(this_rq, busiest);
1063 static inline void double_unlock_balance(
struct rq *this_rq,
struct rq *busiest)
1076 static inline void double_rq_lock(
struct rq *rq1,
struct rq *rq2)
1101 static inline void double_rq_unlock(
struct rq *rq1,
struct rq *rq2)
1120 static inline void double_rq_lock(
struct rq *rq1,
struct rq *rq2)
1136 static inline void double_rq_unlock(
struct rq *rq1,
struct rq *rq2)
1158 enum rq_nohz_flag_bits {
1164 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1167 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1172 #ifndef CONFIG_64BIT
1175 static inline void irq_time_write_begin(
void)
1181 static inline void irq_time_write_end(
void)
1187 static inline u64 irq_time_read(
int cpu)
1193 seq = read_seqcount_begin(&
per_cpu(irq_time_seq, cpu));
1194 irq_time =
per_cpu(cpu_softirq_time, cpu) +
1195 per_cpu(cpu_hardirq_time, cpu);
1196 }
while (read_seqcount_retry(&
per_cpu(irq_time_seq, cpu), seq));
1201 static inline void irq_time_write_begin(
void)
1205 static inline void irq_time_write_end(
void)
1209 static inline u64 irq_time_read(
int cpu)
1211 return per_cpu(cpu_softirq_time, cpu) +
per_cpu(cpu_hardirq_time, cpu);