28 #include <linux/oom.h>
31 #define RCU_KTHREAD_PRIO 1
33 #ifdef CONFIG_RCU_BOOST
34 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
36 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
44 static void __init rcu_bootup_announce_oddness(
void)
46 #ifdef CONFIG_RCU_TRACE
49 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
50 printk(
KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
53 #ifdef CONFIG_RCU_FANOUT_EXACT
56 #ifdef CONFIG_RCU_FAST_NO_HZ
58 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
60 #ifdef CONFIG_PROVE_RCU
63 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
66 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
69 #if defined(CONFIG_RCU_CPU_STALL_INFO)
72 #if NUM_RCU_LVL_4 != 0
75 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
76 printk(
KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
81 #ifdef CONFIG_TREE_PREEMPT_RCU
88 static int rcu_preempted_readers_exp(
struct rcu_node *rnp);
93 static void __init rcu_bootup_announce(
void)
96 rcu_bootup_announce_oddness();
103 long rcu_batches_completed_preempt(
void)
114 return rcu_batches_completed_preempt();
123 force_quiescent_state(&rcu_preempt_state);
137 static void rcu_preempt_qs(
int cpu)
144 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
167 if (t->rcu_read_lock_nesting > 0 &&
168 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
174 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
175 t->rcu_blocked_node = rnp;
198 list_add(&t->rcu_node_entry, rnp->
gp_tasks->prev);
200 #ifdef CONFIG_RCU_BOOST
201 if (rnp->boost_tasks !=
NULL)
205 list_add(&t->rcu_node_entry, &rnp->
blkd_tasks);
215 }
else if (t->rcu_read_lock_nesting < 0 &&
216 t->rcu_read_unlock_special) {
222 rcu_read_unlock_special(t);
244 static int rcu_preempt_blocked_readers_cgp(
struct rcu_node *rnp)
256 static void rcu_report_unblock_qs_rnp(
struct rcu_node *rnp,
unsigned long flags)
262 if (rnp->
qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
274 rcu_report_qs_rsp(&rcu_preempt_state, flags);
282 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
294 np = t->rcu_node_entry.next;
305 void rcu_read_unlock_special(
struct task_struct *t)
312 #ifdef CONFIG_RCU_BOOST
328 special = t->rcu_read_unlock_special;
329 if (special & RCU_READ_UNLOCK_NEED_QS) {
340 if (special & RCU_READ_UNLOCK_BLOCKED) {
341 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
349 rnp = t->rcu_blocked_node;
351 if (rnp == t->rcu_blocked_node)
355 empty = !rcu_preempt_blocked_readers_cgp(rnp);
356 empty_exp = !rcu_preempted_readers_exp(rnp);
358 np = rcu_next_node_entry(t, rnp);
359 list_del_init(&t->rcu_node_entry);
360 t->rcu_blocked_node =
NULL;
363 if (&t->rcu_node_entry == rnp->
gp_tasks)
365 if (&t->rcu_node_entry == rnp->
exp_tasks)
367 #ifdef CONFIG_RCU_BOOST
368 if (&t->rcu_node_entry == rnp->boost_tasks)
369 rnp->boost_tasks = np;
371 if (t->rcu_boost_mutex) {
372 rbmp = t->rcu_boost_mutex;
373 t->rcu_boost_mutex =
NULL;
383 empty_exp_now = !rcu_preempted_readers_exp(rnp);
384 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
392 rcu_report_unblock_qs_rnp(rnp, flags);
397 #ifdef CONFIG_RCU_BOOST
407 if (!empty_exp && empty_exp_now)
408 rcu_report_exp_rnp(&rcu_preempt_state, rnp,
true);
414 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
420 static void rcu_print_detail_task_stall_rnp(
struct rcu_node *rnp)
426 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
441 static
void rcu_print_detail_task_stall(
struct rcu_state *
rsp)
443 struct rcu_node *rnp = rcu_get_root(rsp);
445 rcu_print_detail_task_stall_rnp(rnp);
447 rcu_print_detail_task_stall_rnp(rnp);
452 static void rcu_print_detail_task_stall(
struct rcu_state *rsp)
458 #ifdef CONFIG_RCU_CPU_STALL_INFO
460 static void rcu_print_task_stall_begin(
struct rcu_node *rnp)
462 printk(
KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
466 static void rcu_print_task_stall_end(
void)
473 static void rcu_print_task_stall_begin(
struct rcu_node *rnp)
477 static void rcu_print_task_stall_end(
void)
487 static int rcu_print_task_stall(
struct rcu_node *rnp)
492 if (!rcu_preempt_blocked_readers_cgp(rnp))
494 rcu_print_task_stall_begin(rnp);
501 rcu_print_task_stall_end();
515 static void rcu_preempt_check_blocked_tasks(
struct rcu_node *rnp)
523 #ifdef CONFIG_HOTPLUG_CPU
539 static int rcu_preempt_offline_tasks(
struct rcu_state *rsp,
546 struct rcu_node *rnp_root = rcu_get_root(rsp);
549 if (rnp == rnp_root) {
550 WARN_ONCE(1,
"Last CPU thought to be offlined?");
566 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->
qsmask == 0)
568 if (rcu_preempted_readers_exp(rnp))
572 while (!list_empty(lp)) {
576 t->rcu_blocked_node = rnp_root;
577 list_add(&t->rcu_node_entry, lp_root);
578 if (&t->rcu_node_entry == rnp->
gp_tasks)
580 if (&t->rcu_node_entry == rnp->
exp_tasks)
582 #ifdef CONFIG_RCU_BOOST
583 if (&t->rcu_node_entry == rnp->boost_tasks)
584 rnp_root->boost_tasks = rnp->boost_tasks;
591 #ifdef CONFIG_RCU_BOOST
592 rnp->boost_tasks =
NULL;
599 if (rnp_root->boost_tasks !=
NULL &&
600 rnp_root->boost_tasks != rnp_root->
gp_tasks &&
601 rnp_root->boost_tasks != rnp_root->
exp_tasks)
602 rnp_root->boost_tasks = rnp_root->
gp_tasks;
618 static void rcu_preempt_check_callbacks(
int cpu)
622 if (t->rcu_read_lock_nesting == 0) {
626 if (t->rcu_read_lock_nesting > 0 &&
627 per_cpu(rcu_preempt_data, cpu).qs_pending)
628 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
631 #ifdef CONFIG_RCU_BOOST
633 static void rcu_preempt_do_callbacks(
void)
635 rcu_do_batch(&rcu_preempt_state, &
__get_cpu_var(rcu_preempt_data));
645 __call_rcu(head,
func, &rcu_preempt_state, 0);
659 __call_rcu(head,
func, &rcu_preempt_state, 1);
674 void synchronize_rcu(
void)
679 "Illegal synchronize_rcu() in RCU read-side critical section");
680 if (!rcu_scheduler_active)
687 static unsigned long sync_rcu_preempt_exp_count;
696 static int rcu_preempted_readers_exp(
struct rcu_node *rnp)
710 static int sync_rcu_preempt_exp_done(
struct rcu_node *rnp)
712 return !rcu_preempted_readers_exp(rnp) &&
729 static void rcu_report_exp_rnp(
struct rcu_state *rsp,
struct rcu_node *rnp,
737 if (!sync_rcu_preempt_exp_done(rnp)) {
744 wake_up(&sync_rcu_preempt_exp_wq);
763 sync_rcu_preempt_exp_init(
struct rcu_state *rsp,
struct rcu_node *rnp)
773 rcu_initiate_boost(rnp, flags);
777 rcu_report_exp_rnp(rsp, rnp,
false);
801 struct rcu_state *rsp = &rcu_preempt_state;
806 snap =
ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
830 if (trycount++ < 10) {
855 sync_rcu_preempt_exp_init(rsp, rnp);
857 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
862 rnp = rcu_get_root(rsp);
864 sync_rcu_preempt_exp_done(rnp));
879 void rcu_barrier(
void)
881 _rcu_barrier(&rcu_preempt_state);
888 static void __init __rcu_init_preempt(
void)
890 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
900 static void __init rcu_bootup_announce(
void)
903 rcu_bootup_announce_oddness();
937 static int rcu_preempt_blocked_readers_cgp(
struct rcu_node *rnp)
942 #ifdef CONFIG_HOTPLUG_CPU
945 static void rcu_report_unblock_qs_rnp(
struct rcu_node *rnp,
unsigned long flags)
956 static void rcu_print_detail_task_stall(
struct rcu_state *rsp)
964 static int rcu_print_task_stall(
struct rcu_node *rnp)
974 static void rcu_preempt_check_blocked_tasks(
struct rcu_node *rnp)
979 #ifdef CONFIG_HOTPLUG_CPU
987 static int rcu_preempt_offline_tasks(
struct rcu_state *rsp,
1000 static void rcu_preempt_check_callbacks(
int cpu)
1030 #ifdef CONFIG_HOTPLUG_CPU
1037 static void rcu_report_exp_rnp(
struct rcu_state *rsp,
struct rcu_node *rnp,
1057 static void __init __rcu_init_preempt(
void)
1063 #ifdef CONFIG_RCU_BOOST
1067 #ifdef CONFIG_RCU_TRACE
1069 static void rcu_initiate_boost_trace(
struct rcu_node *rnp)
1072 rnp->n_balk_blkd_tasks++;
1074 rnp->n_balk_exp_gp_tasks++;
1076 rnp->n_balk_boost_tasks++;
1078 rnp->n_balk_notblocked++;
1081 rnp->n_balk_notyet++;
1088 static void rcu_initiate_boost_trace(
struct rcu_node *rnp)
1112 static int rcu_boost(
struct rcu_node *rnp)
1114 unsigned long flags;
1141 rnp->n_exp_boosts++;
1143 tb = rnp->boost_tasks;
1144 rnp->n_normal_boosts++;
1146 rnp->n_tasks_boosted++;
1166 t->rcu_boost_mutex = &mtx;
1179 static int rcu_boost_kthread(
void *
arg)
1185 trace_rcu_utilization(
"Start boost kthread@init");
1188 trace_rcu_utilization(
"End boost kthread@rcu_wait");
1190 trace_rcu_utilization(
"Start boost kthread@rcu_wait");
1192 more2boost = rcu_boost(rnp);
1199 trace_rcu_utilization(
"End boost kthread@rcu_yield");
1201 trace_rcu_utilization(
"Start boost kthread@rcu_yield");
1206 trace_rcu_utilization(
"End boost kthread@notreached");
1220 static void rcu_initiate_boost(
struct rcu_node *rnp,
unsigned long flags)
1224 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->
exp_tasks ==
NULL) {
1225 rnp->n_balk_exp_gp_tasks++;
1231 rnp->boost_tasks ==
NULL &&
1237 t = rnp->boost_kthread_task;
1239 rcu_wake_cond(t, rnp->boost_kthread_status);
1241 rcu_initiate_boost_trace(rnp);
1249 static void invoke_rcu_callbacks_kthread(
void)
1251 unsigned long flags;
1267 static bool rcu_is_callbacks_kthread(
void)
1272 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1277 static void rcu_preempt_boost_start_gp(
struct rcu_node *rnp)
1279 rnp->boost_time =
jiffies + RCU_BOOST_DELAY_JIFFIES;
1287 static int __cpuinit rcu_spawn_one_boost_kthread(
struct rcu_state *rsp,
1290 int rnp_index = rnp - &rsp->
node[0];
1291 unsigned long flags;
1295 if (&rcu_preempt_state != rsp)
1298 if (!rcu_scheduler_fully_active || rnp->
qsmaskinit == 0)
1302 if (rnp->boost_kthread_task !=
NULL)
1305 "rcub/%d", rnp_index);
1309 rnp->boost_kthread_task =
t;
1317 static void rcu_kthread_do_work(
void)
1321 rcu_preempt_do_callbacks();
1324 static void rcu_cpu_kthread_setup(
unsigned int cpu)
1332 static void rcu_cpu_kthread_park(
unsigned int cpu)
1337 static int rcu_cpu_kthread_should_run(
unsigned int cpu)
1347 static void rcu_cpu_kthread(
unsigned int cpu)
1349 unsigned int *statusp = &
__get_cpu_var(rcu_cpu_kthread_status);
1353 for (spincnt = 0; spincnt < 10; spincnt++) {
1354 trace_rcu_utilization(
"Start CPU kthread@rcu_wait");
1363 rcu_kthread_do_work();
1366 trace_rcu_utilization(
"End CPU kthread@rcu_wait");
1372 trace_rcu_utilization(
"Start CPU kthread@rcu_yield");
1374 trace_rcu_utilization(
"End CPU kthread@rcu_yield");
1387 static void rcu_boost_kthread_setaffinity(
struct rcu_node *rnp,
int outgoingcpu)
1399 if ((mask & 0x1) && cpu != outgoingcpu)
1400 cpumask_set_cpu(cpu, cm);
1401 if (cpumask_weight(cm) == 0) {
1404 cpumask_clear_cpu(cpu, cm);
1407 set_cpus_allowed_ptr(t, cm);
1408 free_cpumask_var(cm);
1412 .
store = &rcu_cpu_kthread_task,
1413 .thread_should_run = rcu_cpu_kthread_should_run,
1414 .thread_fn = rcu_cpu_kthread,
1415 .thread_comm =
"rcuc/%u",
1416 .setup = rcu_cpu_kthread_setup,
1417 .park = rcu_cpu_kthread_park,
1423 static int __init rcu_spawn_kthreads(
void)
1428 rcu_scheduler_fully_active = 1;
1430 per_cpu(rcu_cpu_has_work, cpu) = 0;
1432 rnp = rcu_get_root(rcu_state);
1433 (
void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1436 (
void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1442 static void __cpuinit rcu_prepare_kthreads(
int cpu)
1448 if (rcu_scheduler_fully_active)
1449 (
void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1454 static void rcu_initiate_boost(
struct rcu_node *rnp,
unsigned long flags)
1459 static void invoke_rcu_callbacks_kthread(
void)
1464 static bool rcu_is_callbacks_kthread(
void)
1469 static void rcu_preempt_boost_start_gp(
struct rcu_node *rnp)
1473 static void rcu_boost_kthread_setaffinity(
struct rcu_node *rnp,
int outgoingcpu)
1477 static int __init rcu_scheduler_really_started(
void)
1479 rcu_scheduler_fully_active = 1;
1484 static void __cpuinit rcu_prepare_kthreads(
int cpu)
1490 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1504 return rcu_cpu_has_callbacks(cpu);
1510 static void rcu_prepare_for_idle_init(
int cpu)
1518 static void rcu_cleanup_after_idle(
int cpu)
1526 static void rcu_prepare_for_idle(
int cpu)
1534 static void rcu_idle_count_callbacks_posted(
void)
1573 #define RCU_IDLE_FLUSHES 5
1574 #define RCU_IDLE_OPT_FLUSHES 3
1575 #define RCU_IDLE_GP_DELAY 4
1576 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)
1578 extern int tick_nohz_enabled;
1585 static bool __rcu_cpu_has_nonlazy_callbacks(
struct rcu_data *rdp)
1590 #ifdef CONFIG_TREE_PREEMPT_RCU
1596 static bool rcu_preempt_cpu_has_nonlazy_callbacks(
int cpu)
1600 return __rcu_cpu_has_nonlazy_callbacks(rdp);
1605 static bool rcu_preempt_cpu_has_nonlazy_callbacks(
int cpu)
1615 static bool rcu_cpu_has_nonlazy_callbacks(
int cpu)
1617 return __rcu_cpu_has_nonlazy_callbacks(&
per_cpu(rcu_sched_data, cpu)) ||
1618 __rcu_cpu_has_nonlazy_callbacks(&
per_cpu(rcu_bh_data, cpu)) ||
1619 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
1644 rdtp->idle_first_pass = 1;
1646 if (!rcu_cpu_has_callbacks(cpu)) {
1650 if (rdtp->dyntick_holdoff == jiffies) {
1656 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1657 *delta_jiffies =
round_up(RCU_IDLE_GP_DELAY + jiffies,
1660 *delta_jiffies =
jiffies + RCU_IDLE_LAZY_GP_DELAY;
1670 void rcu_idle_demigrate(
void *
unused)
1686 static void rcu_idle_gp_timer_func(
unsigned long cpu_in)
1688 int cpu = (
int)cpu_in;
1700 static void rcu_prepare_for_idle_init(
int cpu)
1704 rdtp->dyntick_holdoff =
jiffies - 1;
1705 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
1706 rdtp->idle_gp_timer_expires =
jiffies - 1;
1707 rdtp->idle_first_pass = 1;
1715 static void rcu_cleanup_after_idle(
int cpu)
1721 rdtp->tick_nohz_enabled_snap =
ACCESS_ONCE(tick_nohz_enabled);
1743 static void rcu_prepare_for_idle(
int cpu)
1751 if (tne != rdtp->tick_nohz_enabled_snap) {
1752 if (rcu_cpu_has_callbacks(cpu))
1754 rdtp->tick_nohz_enabled_snap = tne;
1762 rdtp->dyntick_holdoff =
jiffies - 1;
1763 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1765 rdtp->idle_gp_timer_expires =
1766 round_up(jiffies + RCU_IDLE_GP_DELAY,
1768 }
else if (rcu_cpu_has_callbacks(cpu)) {
1769 rdtp->idle_gp_timer_expires =
1775 tp = &rdtp->idle_gp_timer;
1788 if (!rdtp->idle_first_pass &&
1789 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
1790 if (rcu_cpu_has_callbacks(cpu)) {
1791 tp = &rdtp->idle_gp_timer;
1796 rdtp->idle_first_pass = 0;
1797 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
1803 if (!rcu_cpu_has_callbacks(cpu)) {
1804 rdtp->dyntick_holdoff =
jiffies - 1;
1805 rdtp->dyntick_drain = 0;
1814 if (rdtp->dyntick_holdoff == jiffies) {
1820 if (rdtp->dyntick_drain <= 0) {
1822 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
1823 }
else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
1824 !rcu_pending(cpu) &&
1827 rdtp->dyntick_drain = 0;
1828 rdtp->dyntick_holdoff =
jiffies;
1829 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1831 rdtp->idle_gp_timer_expires =
1832 round_up(jiffies + RCU_IDLE_GP_DELAY,
1835 rdtp->idle_gp_timer_expires =
1839 tp = &rdtp->idle_gp_timer;
1841 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1843 }
else if (--(rdtp->dyntick_drain) <= 0) {
1845 rdtp->dyntick_holdoff =
jiffies;
1855 #ifdef CONFIG_TREE_PREEMPT_RCU
1856 if (
per_cpu(rcu_preempt_data, cpu).nxtlist) {
1857 rcu_preempt_qs(cpu);
1858 force_quiescent_state(&rcu_preempt_state);
1861 if (
per_cpu(rcu_sched_data, cpu).nxtlist) {
1865 if (
per_cpu(rcu_bh_data, cpu).nxtlist) {
1874 if (rcu_cpu_has_callbacks(cpu)) {
1890 static void rcu_idle_count_callbacks_posted(
void)
1898 static atomic_t oom_callback_count;
1905 static void rcu_oom_callback(
struct rcu_head *rhp)
1918 static void rcu_oom_notify_cpu(
void *
unused)
1920 struct rcu_state *
rsp;
1927 rsp->
call(&rdp->oom_head, rcu_oom_callback);
1940 unsigned long notused,
void *nfreed)
1970 static int __init rcu_register_oom_notifier(
void)
1979 #ifdef CONFIG_RCU_CPU_STALL_INFO
1981 #ifdef CONFIG_RCU_FAST_NO_HZ
1983 static void print_cpu_stall_fast_no_hz(
char *
cp,
int cpu)
1986 struct timer_list *tltp = &rdtp->idle_gp_timer;
1989 c = rdtp->dyntick_holdoff ==
jiffies ?
'H' :
'.';
1990 if (timer_pending(tltp))
1991 sprintf(cp,
"drain=%d %c timer=%lu",
1992 rdtp->dyntick_drain, c, tltp->
expires - jiffies);
1994 sprintf(cp,
"drain=%d %c timer not pending",
1995 rdtp->dyntick_drain, c);
2000 static void print_cpu_stall_fast_no_hz(
char *cp,
int cpu)
2008 static void print_cpu_stall_info_begin(
void)
2025 static void print_cpu_stall_info(
struct rcu_state *rsp,
int cpu)
2027 char fast_no_hz[72];
2031 unsigned long ticks_value;
2034 ticks_title =
"ticks this GP";
2035 ticks_value = rdp->ticks_this_gp;
2037 ticks_title =
"GPs behind";
2040 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2042 cpu, ticks_value, ticks_title,
2049 static void print_cpu_stall_info_end(
void)
2055 static void zero_cpu_stall_ticks(
struct rcu_data *rdp)
2057 rdp->ticks_this_gp = 0;
2061 static void increment_cpu_stall_ticks(
void)
2063 struct rcu_state *
rsp;
2071 static void print_cpu_stall_info_begin(
void)
2076 static void print_cpu_stall_info(
struct rcu_state *rsp,
int cpu)
2081 static void print_cpu_stall_info_end(
void)
2086 static void zero_cpu_stall_ticks(
struct rcu_data *rdp)
2090 static void increment_cpu_stall_ticks(
void)