8 #include <linux/slab.h>
23 now = hrtimer_cb_get_time(timer);
29 idle = do_sched_rt_period_timer(rt_b, overrun);
47 static void start_rt_bandwidth(
struct rt_bandwidth *rt_b)
67 INIT_LIST_HEAD(array->
queue + i);
73 #if defined CONFIG_SMP
76 rt_rq->rt_nr_migratory = 0;
77 rt_rq->overloaded = 0;
78 plist_head_init(&rt_rq->pushable_tasks);
87 #ifdef CONFIG_RT_GROUP_SCHED
88 static void destroy_rt_bandwidth(
struct rt_bandwidth *rt_b)
93 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
97 #ifdef CONFIG_SCHED_DEBUG
103 static inline struct rq *rq_of_rt_rq(
struct rt_rq *
rt_rq)
118 destroy_rt_bandwidth(&tg->rt_bandwidth);
131 void init_tg_rt_entry(
struct task_group *tg,
struct rt_rq *
rt_rq,
138 rt_rq->rt_nr_boosted = 0;
142 tg->rt_rq[
cpu] = rt_rq;
143 tg->rt_se[
cpu] = rt_se;
149 rt_se->rt_rq = &rq->
rt;
151 rt_se->rt_rq = parent->my_q;
154 rt_se->parent = parent;
164 tg->rt_rq = kzalloc(
sizeof(rt_rq) * nr_cpu_ids,
GFP_KERNEL);
167 tg->rt_se = kzalloc(
sizeof(rt_se) * nr_cpu_ids,
GFP_KERNEL);
175 rt_rq = kzalloc_node(
sizeof(
struct rt_rq),
186 rt_rq->
rt_runtime = tg->rt_bandwidth.rt_runtime;
187 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
200 #define rt_entity_is_task(rt_se) (1)
207 static inline struct rq *rq_of_rt_rq(
struct rt_rq *rt_rq)
230 static inline int rt_overloaded(
struct rq *rq)
235 static inline void rt_set_overload(
struct rq *rq)
240 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
252 static inline void rt_clear_overload(
struct rq *rq)
259 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
262 static void update_rt_migration(
struct rt_rq *rt_rq)
264 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
265 if (!rt_rq->overloaded) {
266 rt_set_overload(rq_of_rt_rq(rt_rq));
267 rt_rq->overloaded = 1;
269 }
else if (rt_rq->overloaded) {
270 rt_clear_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 0;
275 static void inc_rt_migration(
struct sched_rt_entity *rt_se,
struct rt_rq *rt_rq)
282 p = rt_task_of(rt_se);
283 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
285 rt_rq->rt_nr_total++;
287 rt_rq->rt_nr_migratory++;
289 update_rt_migration(rt_rq);
292 static void dec_rt_migration(
struct sched_rt_entity *rt_se,
struct rt_rq *rt_rq)
299 p = rt_task_of(rt_se);
300 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
302 rt_rq->rt_nr_total--;
304 rt_rq->rt_nr_migratory--;
306 update_rt_migration(rt_rq);
309 static inline int has_pushable_tasks(
struct rq *rq)
311 return !plist_head_empty(&rq->
rt.pushable_tasks);
314 static void enqueue_pushable_task(
struct rq *rq,
struct task_struct *p)
316 plist_del(&p->pushable_tasks, &rq->
rt.pushable_tasks);
317 plist_node_init(&p->pushable_tasks, p->
prio);
318 plist_add(&p->pushable_tasks, &rq->
rt.pushable_tasks);
321 if (p->
prio < rq->
rt.highest_prio.next)
322 rq->
rt.highest_prio.next = p->
prio;
325 static void dequeue_pushable_task(
struct rq *rq,
struct task_struct *p)
327 plist_del(&p->pushable_tasks, &rq->
rt.pushable_tasks);
330 if (has_pushable_tasks(rq)) {
333 rq->
rt.highest_prio.next = p->
prio;
340 static inline void enqueue_pushable_task(
struct rq *rq,
struct task_struct *p)
344 static inline void dequeue_pushable_task(
struct rq *rq,
struct task_struct *p)
349 void inc_rt_migration(
struct sched_rt_entity *rt_se,
struct rt_rq *rt_rq)
354 void dec_rt_migration(
struct sched_rt_entity *rt_se,
struct rt_rq *rt_rq)
362 return !list_empty(&rt_se->
run_list);
365 #ifdef CONFIG_RT_GROUP_SCHED
367 static inline u64 sched_rt_runtime(
struct rt_rq *rt_rq)
375 static inline u64 sched_rt_period(
struct rt_rq *rt_rq)
377 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
382 static inline struct task_group *next_task_group(
struct task_group *tg)
385 tg = list_entry_rcu(tg->list.next,
387 }
while (&tg->list != &task_groups && task_group_is_autogroup(tg));
389 if (&tg->list == &task_groups)
395 #define for_each_rt_rq(rt_rq, iter, rq) \
396 for (iter = container_of(&task_groups, typeof(*iter), list); \
397 (iter = next_task_group(iter)) && \
398 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
400 static inline void list_add_leaf_rt_rq(
struct rt_rq *rt_rq)
402 list_add_rcu(&rt_rq->leaf_rt_rq_list,
403 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
406 static inline void list_del_leaf_rt_rq(
struct rt_rq *rt_rq)
408 list_del_rcu(&rt_rq->leaf_rt_rq_list);
411 #define for_each_leaf_rt_rq(rt_rq, rq) \
412 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
414 #define for_each_sched_rt_entity(rt_se) \
415 for (; rt_se; rt_se = rt_se->parent)
425 static void sched_rt_rq_enqueue(
struct rt_rq *rt_rq)
430 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
432 rt_se = rt_rq->tg->rt_se[
cpu];
435 if (rt_se && !on_rt_rq(rt_se))
436 enqueue_rt_entity(rt_se,
false);
437 if (rt_rq->highest_prio.curr < curr->
prio)
442 static void sched_rt_rq_dequeue(
struct rt_rq *rt_rq)
445 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
447 rt_se = rt_rq->tg->rt_se[
cpu];
449 if (rt_se && on_rt_rq(rt_se))
450 dequeue_rt_entity(rt_se);
453 static inline int rt_rq_throttled(
struct rt_rq *rt_rq)
460 struct rt_rq *rt_rq = group_rt_rq(rt_se);
464 return !!rt_rq->rt_nr_boosted;
466 p = rt_task_of(rt_se);
471 static inline const struct cpumask *sched_rt_period_mask(
void)
476 static inline const struct cpumask *sched_rt_period_mask(
void)
483 struct rt_rq *sched_rt_period_rt_rq(
struct rt_bandwidth *rt_b,
int cpu)
488 static inline struct rt_bandwidth *sched_rt_bandwidth(
struct rt_rq *rt_rq)
490 return &rt_rq->tg->rt_bandwidth;
495 static inline u64 sched_rt_runtime(
struct rt_rq *rt_rq)
500 static inline u64 sched_rt_period(
struct rt_rq *rt_rq)
507 #define for_each_rt_rq(rt_rq, iter, rq) \
508 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
510 static inline void list_add_leaf_rt_rq(
struct rt_rq *rt_rq)
514 static inline void list_del_leaf_rt_rq(
struct rt_rq *rt_rq)
518 #define for_each_leaf_rt_rq(rt_rq, rq) \
519 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
521 #define for_each_sched_rt_entity(rt_se) \
522 for (; rt_se; rt_se = NULL)
529 static inline void sched_rt_rq_enqueue(
struct rt_rq *rt_rq)
535 static inline void sched_rt_rq_dequeue(
struct rt_rq *rt_rq)
539 static inline int rt_rq_throttled(
struct rt_rq *rt_rq)
544 static inline const struct cpumask *sched_rt_period_mask(
void)
550 struct rt_rq *sched_rt_period_rt_rq(
struct rt_bandwidth *rt_b,
int cpu)
555 static inline struct rt_bandwidth *sched_rt_bandwidth(
struct rt_rq *rt_rq)
566 static int do_balance_runtime(
struct rt_rq *rt_rq)
573 weight = cpumask_weight(rd->span);
576 rt_period = ktime_to_ns(rt_b->
rt_period);
578 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
599 diff = div_u64((
u64)diff, weight);
621 static void __disable_runtime(
struct rq *rq)
623 struct root_domain *rd = rq->rd;
658 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
700 static void disable_runtime(
struct rq *rq)
705 __disable_runtime(rq);
709 static void __enable_runtime(
struct rq *rq)
733 static void enable_runtime(
struct rq *rq)
738 __enable_runtime(rq);
744 int cpu = (
int)(
long)hcpu;
749 disable_runtime(
cpu_rq(cpu));
756 enable_runtime(
cpu_rq(cpu));
764 static int balance_runtime(
struct rt_rq *rt_rq)
773 more = do_balance_runtime(rt_rq);
780 static inline int balance_runtime(
struct rt_rq *rt_rq)
786 static int do_sched_rt_period_timer(
struct rt_bandwidth *rt_b,
int overrun)
788 int i, idle = 1, throttled = 0;
791 span = sched_rt_period_mask();
792 #ifdef CONFIG_RT_GROUP_SCHED
807 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
808 struct rq *rq = rq_of_rt_rq(rt_rq);
816 balance_runtime(rt_rq);
835 if (!rt_rq_throttled(rt_rq))
842 sched_rt_rq_enqueue(rt_rq);
854 #ifdef CONFIG_RT_GROUP_SCHED
855 struct rt_rq *rt_rq = group_rt_rq(rt_se);
858 return rt_rq->highest_prio.curr;
861 return rt_task_of(rt_se)->prio;
864 static int sched_rt_runtime_exceeded(
struct rt_rq *rt_rq)
866 u64 runtime = sched_rt_runtime(rt_rq);
869 return rt_rq_throttled(rt_rq);
871 if (runtime >= sched_rt_period(rt_rq))
874 balance_runtime(rt_rq);
875 runtime = sched_rt_runtime(rt_rq);
879 if (rt_rq->
rt_time > runtime) {
887 static bool once =
false;
893 printk_sched(
"sched: RT throttling activated\n");
904 if (rt_rq_throttled(rt_rq)) {
905 sched_rt_rq_dequeue(rt_rq);
917 static void update_curr_rt(
struct rq *rq)
921 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
932 max(curr->
se.statistics.exec_max, delta_exec));
934 curr->
se.sum_exec_runtime += delta_exec;
935 account_group_exec_runtime(curr, delta_exec);
938 cpuacct_charge(curr, delta_exec);
940 sched_rt_avg_update(rq, delta_exec);
942 if (!rt_bandwidth_enabled())
946 rt_rq = rt_rq_of_se(rt_se);
951 if (sched_rt_runtime_exceeded(rt_rq))
958 #if defined CONFIG_SMP
961 inc_rt_prio_smp(
struct rt_rq *rt_rq,
int prio,
int prev_prio)
963 struct rq *rq = rq_of_rt_rq(rt_rq);
965 if (rq->online && prio < prev_prio)
970 dec_rt_prio_smp(
struct rt_rq *rt_rq,
int prio,
int prev_prio)
972 struct rq *rq = rq_of_rt_rq(rt_rq);
974 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
975 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
981 void inc_rt_prio_smp(
struct rt_rq *rt_rq,
int prio,
int prev_prio) {}
983 void dec_rt_prio_smp(
struct rt_rq *rt_rq,
int prio,
int prev_prio) {}
987 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
989 inc_rt_prio(
struct rt_rq *rt_rq,
int prio)
991 int prev_prio = rt_rq->highest_prio.curr;
993 if (prio < prev_prio)
994 rt_rq->highest_prio.curr =
prio;
996 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1000 dec_rt_prio(
struct rt_rq *rt_rq,
int prio)
1002 int prev_prio = rt_rq->highest_prio.curr;
1012 if (prio == prev_prio) {
1015 rt_rq->highest_prio.curr =
1016 sched_find_first_bit(array->bitmap);
1022 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1027 static inline void inc_rt_prio(
struct rt_rq *rt_rq,
int prio) {}
1028 static inline void dec_rt_prio(
struct rt_rq *rt_rq,
int prio) {}
1032 #ifdef CONFIG_RT_GROUP_SCHED
1037 if (rt_se_boosted(rt_se))
1038 rt_rq->rt_nr_boosted++;
1041 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1047 if (rt_se_boosted(rt_se))
1048 rt_rq->rt_nr_boosted--;
1062 void dec_rt_group(
struct sched_rt_entity *rt_se,
struct rt_rq *rt_rq) {}
1069 int prio = rt_se_prio(rt_se);
1074 inc_rt_prio(rt_rq, prio);
1075 inc_rt_migration(rt_se, rt_rq);
1076 inc_rt_group(rt_se, rt_rq);
1082 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1086 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1087 dec_rt_migration(rt_se, rt_rq);
1088 dec_rt_group(rt_se, rt_rq);
1093 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1095 struct rt_rq *group_rq = group_rt_rq(rt_se);
1104 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->
rt_nr_running))
1108 list_add_leaf_rt_rq(rt_rq);
1114 __set_bit(rt_se_prio(rt_se), array->bitmap);
1116 inc_rt_tasks(rt_se, rt_rq);
1121 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1125 if (list_empty(array->
queue + rt_se_prio(rt_se)))
1128 dec_rt_tasks(rt_se, rt_rq);
1130 list_del_leaf_rt_rq(rt_rq);
1146 for (rt_se = back; rt_se; rt_se = rt_se->
back) {
1147 if (on_rt_rq(rt_se))
1148 __dequeue_rt_entity(rt_se);
1152 static void enqueue_rt_entity(
struct sched_rt_entity *rt_se,
bool head)
1154 dequeue_rt_stack(rt_se);
1156 __enqueue_rt_entity(rt_se, head);
1161 dequeue_rt_stack(rt_se);
1164 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1167 __enqueue_rt_entity(rt_se,
false);
1185 enqueue_pushable_task(rq, p);
1190 static void dequeue_task_rt(
struct rq *rq,
struct task_struct *p,
int flags)
1195 dequeue_rt_entity(rt_se);
1197 dequeue_pushable_task(rq, p);
1207 requeue_rt_entity(
struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se,
int head)
1209 if (on_rt_rq(rt_se)) {
1214 list_move(&rt_se->
run_list, queue);
1216 list_move_tail(&rt_se->
run_list, queue);
1220 static void requeue_task_rt(
struct rq *rq,
struct task_struct *p,
int head)
1223 struct rt_rq *rt_rq;
1226 rt_rq = rt_rq_of_se(rt_se);
1227 requeue_rt_entity(rt_rq, rt_se, head);
1231 static void yield_task_rt(
struct rq *rq)
1233 requeue_task_rt(rq, rq->
curr, 0);
1240 select_task_rq_rt(
struct task_struct *p,
int sd_flag,
int flags)
1252 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1282 if (curr &&
unlikely(rt_task(curr)) &&
1286 int target = find_lowest_rq(p);
1297 static void check_preempt_equal_prio(
struct rq *rq,
struct task_struct *p)
1299 if (rq->
curr->nr_cpus_allowed == 1)
1314 requeue_task_rt(rq, p, 1);
1323 static void check_preempt_curr_rt(
struct rq *rq,
struct task_struct *p,
int flags)
1343 if (p->
prio == rq->
curr->prio && !test_tsk_need_resched(rq->
curr))
1344 check_preempt_equal_prio(rq, p);
1349 struct rt_rq *rt_rq)
1356 idx = sched_find_first_bit(array->bitmap);
1365 static struct task_struct *_pick_next_task_rt(
struct rq *rq)
1369 struct rt_rq *rt_rq;
1376 if (rt_rq_throttled(rt_rq))
1380 rt_se = pick_next_rt_entity(rq, rt_rq);
1382 rt_rq = group_rt_rq(rt_se);
1385 p = rt_task_of(rt_se);
1391 static struct task_struct *pick_next_task_rt(
struct rq *rq)
1397 dequeue_pushable_task(rq, p);
1404 rq->post_schedule = has_pushable_tasks(rq);
1410 static void put_prev_task_rt(
struct rq *rq,
struct task_struct *p)
1419 enqueue_pushable_task(rq, p);
1425 #define RT_MAX_TRIES 3
1427 static int pick_rt_task(
struct rq *rq,
struct task_struct *p,
int cpu)
1429 if (!task_running(rq, p) &&
1437 static struct task_struct *pick_next_highest_task_rt(
struct rq *rq,
int cpu)
1442 struct rt_rq *rt_rq;
1447 idx = sched_find_first_bit(array->bitmap);
1451 if (next && next->
prio <= idx)
1459 p = rt_task_of(rt_se);
1460 if (pick_rt_task(rq, p, cpu)) {
1478 struct sched_domain *
sd;
1481 int cpu = task_cpu(task);
1512 for_each_domain(cpu, sd) {
1513 if (sd->flags & SD_WAKE_AFFINE) {
1520 if (this_cpu != -1 &&
1527 sched_domain_span(sd));
1528 if (best_cpu < nr_cpu_ids) {
1545 if (cpu < nr_cpu_ids)
1551 static struct rq *find_lock_lowest_rq(
struct task_struct *task,
struct rq *rq)
1553 struct rq *lowest_rq =
NULL;
1557 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1558 cpu = find_lowest_rq(task);
1560 if ((cpu == -1) || (cpu == rq->cpu))
1566 if (double_lock_balance(rq, lowest_rq)) {
1576 task_running(rq, task) ||
1579 double_unlock_balance(rq, lowest_rq);
1586 if (lowest_rq->
rt.highest_prio.curr > task->
prio)
1590 double_unlock_balance(rq, lowest_rq);
1597 static struct task_struct *pick_next_pushable_task(
struct rq *rq)
1601 if (!has_pushable_tasks(rq))
1607 BUG_ON(rq->cpu != task_cpu(p));
1608 BUG_ON(task_current(rq, p));
1622 static int push_rt_task(
struct rq *rq)
1625 struct rq *lowest_rq;
1628 if (!rq->
rt.overloaded)
1631 next_task = pick_next_pushable_task(rq);
1655 lowest_rq = find_lock_lowest_rq(next_task, rq);
1666 task = pick_next_pushable_task(rq);
1667 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1684 put_task_struct(next_task);
1690 set_task_cpu(next_task, lowest_rq->cpu);
1696 double_unlock_balance(rq, lowest_rq);
1699 put_task_struct(next_task);
1704 static void push_rt_tasks(
struct rq *rq)
1707 while (push_rt_task(rq))
1711 static int pull_rt_task(
struct rq *
this_rq)
1713 int this_cpu = this_rq->cpu, ret = 0,
cpu;
1717 if (
likely(!rt_overloaded(this_rq)))
1721 if (this_cpu == cpu)
1733 if (src_rq->
rt.highest_prio.next >=
1734 this_rq->
rt.highest_prio.curr)
1742 double_lock_balance(this_rq, src_rq);
1747 if (src_rq->
rt.rt_nr_running <= 1)
1750 p = pick_next_highest_task_rt(src_rq, this_cpu);
1756 if (p && (p->
prio < this_rq->
rt.highest_prio.curr)) {
1774 set_task_cpu(p, this_cpu);
1784 double_unlock_balance(this_rq, src_rq);
1793 if (rq->
rt.highest_prio.curr > prev->
prio)
1797 static void post_schedule_rt(
struct rq *rq)
1806 static void task_woken_rt(
struct rq *rq,
struct task_struct *p)
1808 if (!task_running(rq, p) &&
1809 !test_tsk_need_resched(rq->
curr) &&
1810 has_pushable_tasks(rq) &&
1812 rt_task(rq->
curr) &&
1813 (rq->
curr->nr_cpus_allowed < 2 ||
1818 static void set_cpus_allowed_rt(
struct task_struct *p,
1819 const struct cpumask *new_mask)
1829 weight = cpumask_weight(new_mask);
1844 if (!task_current(rq, p))
1845 dequeue_pushable_task(rq, p);
1847 rq->
rt.rt_nr_migratory--;
1849 if (!task_current(rq, p))
1850 enqueue_pushable_task(rq, p);
1851 rq->
rt.rt_nr_migratory++;
1854 update_rt_migration(&rq->
rt);
1858 static void rq_online_rt(
struct rq *rq)
1860 if (rq->
rt.overloaded)
1861 rt_set_overload(rq);
1863 __enable_runtime(rq);
1865 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->
rt.highest_prio.curr);
1869 static void rq_offline_rt(
struct rq *rq)
1871 if (rq->
rt.overloaded)
1872 rt_clear_overload(rq);
1874 __disable_runtime(rq);
1883 static void switched_from_rt(
struct rq *rq,
struct task_struct *p)
1892 if (p->
on_rq && !rq->
rt.rt_nr_running)
1901 zalloc_cpumask_var_node(&
per_cpu(local_cpu_mask, i),
1912 static void switched_to_rt(
struct rq *rq,
struct task_struct *p)
1914 int check_resched = 1;
1925 if (rq->
rt.overloaded && push_rt_task(rq) &&
1930 if (check_resched && p->
prio < rq->
curr->prio)
1940 prio_changed_rt(
struct rq *rq,
struct task_struct *p,
int oldprio)
1945 if (rq->
curr == p) {
1951 if (oldprio < p->prio)
1959 if (p->
prio > rq->
rt.highest_prio.curr && rq->
curr == p)
1963 if (oldprio < p->prio)
1990 if (p->
rt.timeout > next)
1995 static void task_tick_rt(
struct rq *rq,
struct task_struct *p,
int queued)
2010 if (--p->
rt.time_slice)
2021 requeue_task_rt(rq, p, 0);
2022 set_tsk_need_resched(p);
2028 static void set_curr_task_rt(
struct rq *rq)
2035 dequeue_pushable_task(rq, p);
2038 static unsigned int get_rr_interval_rt(
struct rq *rq,
struct task_struct *task)
2051 .enqueue_task = enqueue_task_rt,
2052 .dequeue_task = dequeue_task_rt,
2053 .yield_task = yield_task_rt,
2055 .check_preempt_curr = check_preempt_curr_rt,
2057 .pick_next_task = pick_next_task_rt,
2058 .put_prev_task = put_prev_task_rt,
2061 .select_task_rq = select_task_rq_rt,
2063 .set_cpus_allowed = set_cpus_allowed_rt,
2064 .rq_online = rq_online_rt,
2065 .rq_offline = rq_offline_rt,
2066 .pre_schedule = pre_schedule_rt,
2067 .post_schedule = post_schedule_rt,
2068 .task_woken = task_woken_rt,
2069 .switched_from = switched_from_rt,
2072 .set_curr_task = set_curr_task_rt,
2073 .task_tick = task_tick_rt,
2075 .get_rr_interval = get_rr_interval_rt,
2077 .prio_changed = prio_changed_rt,
2078 .switched_to = switched_to_rt,
2081 #ifdef CONFIG_SCHED_DEBUG
2087 struct rt_rq *rt_rq;