30 #include <linux/types.h>
31 #include <linux/kernel.h>
37 #include <linux/sched.h>
40 #include <linux/bitops.h>
41 #include <linux/export.h>
48 #include <linux/time.h>
50 #include <linux/wait.h>
52 #include <linux/prefetch.h>
55 #include <linux/random.h>
67 #define RCU_STATE_INITIALIZER(sname, cr) { \
68 .level = { &sname##_state.node[0] }, \
70 .fqs_state = RCU_GP_IDLE, \
73 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
74 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
75 .orphan_donetail = &sname##_state.orphan_donelist, \
76 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
77 .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
92 static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
95 static int num_rcu_lvl[] = {
130 #ifdef CONFIG_RCU_BOOST
143 static void rcu_boost_kthread_setaffinity(
struct rcu_node *rnp,
int outgoingcpu);
144 static void invoke_rcu_core(
void);
164 static int rcu_gp_in_progress(
struct rcu_state *
rsp)
200 trace_rcu_utilization(
"Start context switch");
203 trace_rcu_utilization(
"End context switch");
210 #if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE)
211 .ignore_user_qs =
true,
216 static long qhimark = 10000;
217 static long qlowmark = 100;
224 int rcu_cpu_stall_timeout
__read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
235 static void force_qs_rnp(
struct rcu_state *
rsp,
int (*
f)(
struct rcu_data *));
236 static void force_quiescent_state(
struct rcu_state *rsp);
237 static int rcu_pending(
int cpu);
262 force_quiescent_state(&rcu_bh_state);
296 force_quiescent_state(&rcu_sched_state);
304 cpu_has_callbacks_ready_to_invoke(
struct rcu_data *
rdp)
313 cpu_needs_another_gp(
struct rcu_state *rsp,
struct rcu_data *
rdp)
317 !rcu_gp_in_progress(rsp);
323 static struct rcu_node *rcu_get_root(
struct rcu_state *rsp)
325 return &rsp->
node[0];
335 static void rcu_eqs_enter_common(
struct rcu_dynticks *rdtp,
long long oldval,
339 if (!user && !is_idle_task(
current)) {
344 WARN_ONCE(1,
"Current pid: %d comm: %s / Idle pid: %d comm: %s",
360 "Illegal idle entry in RCU read-side critical section.");
362 "Illegal idle entry in RCU-bh read-side critical section.");
364 "Illegal idle entry in RCU-sched read-side critical section.");
371 static void rcu_eqs_enter(
bool user)
383 rcu_eqs_enter_common(rdtp, oldval, user);
403 rcu_eqs_enter(
false);
408 #ifdef CONFIG_RCU_USER_QS
417 void rcu_user_enter(
void)
437 if (!rdtp->ignore_user_qs && !rdtp->in_user) {
438 rdtp->in_user =
true;
452 void rcu_user_enter_after_irq(
void)
496 rcu_eqs_enter_common(rdtp, oldval,
true);
507 static void rcu_eqs_exit_common(
struct rcu_dynticks *rdtp,
long long oldval,
517 if (!user && !is_idle_task(
current)) {
523 WARN_ONCE(1,
"Current pid: %d comm: %s / Idle pid: %d comm: %s",
533 static void rcu_eqs_exit(
bool user)
545 rcu_eqs_exit_common(rdtp, oldval, user);
569 #ifdef CONFIG_RCU_USER_QS
576 void rcu_user_exit(
void)
595 rdtp->in_user =
false;
610 void rcu_user_exit_after_irq(
void)
657 rcu_eqs_exit_common(rdtp, oldval,
true);
721 #ifdef CONFIG_RCU_USER_QS
729 if (!rdtp->ignore_user_qs) {
730 clear_tsk_thread_flag(prev,
TIF_NOHZ);
731 set_tsk_thread_flag(next,
TIF_NOHZ);
736 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
759 bool rcu_lockdep_current_cpu_online(
void)
771 !rcu_scheduler_fully_active;
796 static int dyntick_save_progress_counter(
struct rcu_data *rdp)
808 static int rcu_implicit_dynticks_qs(
struct rcu_data *rdp)
852 static int jiffies_till_stall_check(
void)
854 int till_stall_check =
ACCESS_ONCE(rcu_cpu_stall_timeout);
860 if (till_stall_check < 3) {
862 till_stall_check = 3;
863 }
else if (till_stall_check > 300) {
865 till_stall_check = 300;
870 static void record_gp_stall_check_time(
struct rcu_state *rsp)
876 static void print_other_cpu_stall(
struct rcu_state *rsp)
882 struct rcu_node *rnp = rcu_get_root(rsp);
902 print_cpu_stall_info_begin();
905 ndetected += rcu_print_task_stall(rnp);
907 for (cpu = 0; cpu <= rnp->
grphi - rnp->
grplo; cpu++)
909 print_cpu_stall_info(rsp,
921 rnp = rcu_get_root(rsp);
923 ndetected += rcu_print_task_stall(rnp);
926 print_cpu_stall_info_end();
931 else if (!trigger_all_cpu_backtrace())
936 rcu_print_detail_task_stall(rsp);
938 force_quiescent_state(rsp);
941 static void print_cpu_stall(
struct rcu_state *rsp)
944 struct rcu_node *rnp = rcu_get_root(rsp);
952 print_cpu_stall_info_begin();
954 print_cpu_stall_info_end();
956 if (!trigger_all_cpu_backtrace())
962 3 * jiffies_till_stall_check() + 3;
968 static void check_cpu_stall(
struct rcu_state *rsp,
struct rcu_data *rdp)
974 if (rcu_cpu_stall_suppress)
979 if (rcu_gp_in_progress(rsp) &&
983 print_cpu_stall(rsp);
985 }
else if (rcu_gp_in_progress(rsp) &&
989 print_other_cpu_stall(rsp);
995 rcu_cpu_stall_suppress = 1;
1010 struct rcu_state *
rsp;
1017 .notifier_call = rcu_panic,
1020 static void __init check_cpu_stall_init(
void)
1032 static void __note_new_gpnum(
struct rcu_state *rsp,
struct rcu_node *rnp,
struct rcu_data *rdp)
1044 zero_cpu_stall_ticks(rdp);
1048 static void note_new_gpnum(
struct rcu_state *rsp,
struct rcu_data *rdp)
1050 unsigned long flags;
1060 __note_new_gpnum(rsp, rnp, rdp);
1070 check_for_new_grace_period(
struct rcu_state *rsp,
struct rcu_data *rdp)
1072 unsigned long flags;
1077 note_new_gpnum(rsp, rdp);
1087 static void init_callback_list(
struct rcu_data *rdp)
1103 __rcu_process_gp_end(
struct rcu_state *rsp,
struct rcu_node *rnp,
struct rcu_data *rdp)
1146 rcu_process_gp_end(
struct rcu_state *rsp,
struct rcu_data *rdp)
1148 unsigned long flags;
1158 __rcu_process_gp_end(rsp, rnp, rdp);
1168 rcu_start_gp_per_cpu(
struct rcu_state *rsp,
struct rcu_node *rnp,
struct rcu_data *rdp)
1171 __rcu_process_gp_end(rsp, rnp, rdp);
1174 __note_new_gpnum(rsp, rnp, rdp);
1180 static int rcu_gp_init(
struct rcu_state *rsp)
1183 struct rcu_node *rnp = rcu_get_root(rsp);
1188 if (rcu_gp_in_progress(rsp)) {
1197 record_gp_stall_check_time(rsp);
1219 rcu_preempt_check_blocked_tasks(rnp);
1225 rcu_start_gp_per_cpu(rsp, rnp, rdp);
1226 rcu_preempt_boost_start_gp(rnp);
1231 #ifdef CONFIG_PROVE_RCU_DELAY
1232 if ((
random32() % (rcu_num_nodes * 8)) == 0)
1247 int fqs_state = fqs_state_in;
1248 struct rcu_node *rnp = rcu_get_root(rsp);
1253 force_qs_rnp(rsp, dyntick_save_progress_counter);
1257 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
1271 static void rcu_gp_cleanup(
struct rcu_state *rsp)
1273 unsigned long gp_duration;
1275 struct rcu_node *rnp = rcu_get_root(rsp);
1279 if (gp_duration > rsp->
gp_max)
1280 rsp->
gp_max = gp_duration;
1307 rnp = rcu_get_root(rsp);
1314 if (cpu_needs_another_gp(rsp, rdp))
1327 struct rcu_state *rsp =
arg;
1328 struct rcu_node *rnp = rcu_get_root(rsp);
1346 j = jiffies_till_first_fqs;
1349 jiffies_till_first_fqs =
HZ;
1356 !rcu_preempt_blocked_readers_cgp(rnp)),
1360 !rcu_preempt_blocked_readers_cgp(rnp))
1371 j = jiffies_till_next_fqs;
1374 jiffies_till_next_fqs =
HZ;
1377 jiffies_till_next_fqs = 1;
1382 rcu_gp_cleanup(rsp);
1397 rcu_start_gp(
struct rcu_state *rsp,
unsigned long flags)
1401 struct rcu_node *rnp = rcu_get_root(rsp);
1404 !cpu_needs_another_gp(rsp, rdp)) {
1426 static void rcu_report_qs_rsp(
struct rcu_state *rsp,
unsigned long flags)
1443 rcu_report_qs_rnp(
unsigned long mask,
struct rcu_state *rsp,
1444 struct rcu_node *rnp,
unsigned long flags)
1462 if (rnp->
qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1487 rcu_report_qs_rsp(rsp, flags);
1500 rcu_report_qs_rdp(
int cpu,
struct rcu_state *rsp,
struct rcu_data *rdp)
1502 unsigned long flags;
1522 if ((rnp->
qsmask & mask) == 0) {
1533 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1544 rcu_check_quiescent_state(
struct rcu_state *rsp,
struct rcu_data *rdp)
1547 if (check_for_new_grace_period(rsp, rdp))
1568 rcu_report_qs_rdp(rdp->
cpu, rsp, rdp);
1571 #ifdef CONFIG_HOTPLUG_CPU
1579 rcu_send_cbs_to_orphanage(
int cpu,
struct rcu_state *rsp,
1621 init_callback_list(rdp);
1628 static void rcu_adopt_orphan_cbs(
struct rcu_state *rsp)
1638 rcu_idle_count_callbacks_posted();
1671 static void rcu_cleanup_dying_cpu(
struct rcu_state *rsp)
1690 static void rcu_cleanup_dead_cpu(
int cpu,
struct rcu_state *rsp)
1692 unsigned long flags;
1694 int need_report = 0;
1699 rcu_boost_kthread_setaffinity(rnp, -1);
1708 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
1709 rcu_adopt_orphan_cbs(rsp);
1722 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1727 }
while (rnp !=
NULL);
1738 rcu_report_unblock_qs_rnp(rnp, flags);
1742 rcu_report_exp_rnp(rsp, rnp,
true);
1744 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
1746 init_callback_list(rdp);
1754 static void rcu_cleanup_dying_cpu(
struct rcu_state *rsp)
1758 static void rcu_cleanup_dead_cpu(
int cpu,
struct rcu_state *rsp)
1768 static void rcu_do_batch(
struct rcu_state *rsp,
struct rcu_data *rdp)
1770 unsigned long flags;
1776 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1779 need_resched(), is_idle_task(
current),
1780 rcu_is_callbacks_kthread());
1802 count = count_lazy = 0;
1806 debug_rcu_head_unqueue(list);
1807 if (__rcu_reclaim(rsp->
name, list))
1811 if (++count >= bl &&
1813 (!is_idle_task(
current) && !rcu_is_callbacks_kthread())))
1820 rcu_is_callbacks_kthread());
1852 if (cpu_has_callbacks_ready_to_invoke(rdp))
1867 trace_rcu_utilization(
"Start scheduler-tick");
1868 increment_cpu_stall_ticks();
1897 rcu_preempt_check_callbacks(cpu);
1898 if (rcu_pending(cpu))
1900 trace_rcu_utilization(
"End scheduler-tick");
1910 static void force_qs_rnp(
struct rcu_state *rsp,
int (*
f)(
struct rcu_data *))
1914 unsigned long flags;
1922 if (!rcu_gp_in_progress(rsp)) {
1927 rcu_initiate_boost(rnp, flags);
1932 for (; cpu <= rnp->
grphi; cpu++, bit <<= 1) {
1933 if ((rnp->
qsmask & bit) != 0 &&
1940 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1945 rnp = rcu_get_root(rsp);
1948 rcu_initiate_boost(rnp, flags);
1956 static void force_quiescent_state(
struct rcu_state *rsp)
1958 unsigned long flags;
1968 if (rnp_old !=
NULL)
1997 __rcu_process_callbacks(
struct rcu_state *rsp)
1999 unsigned long flags;
2008 rcu_process_gp_end(rsp, rdp);
2011 rcu_check_quiescent_state(rsp, rdp);
2014 if (cpu_needs_another_gp(rsp, rdp)) {
2016 rcu_start_gp(rsp, flags);
2020 if (cpu_has_callbacks_ready_to_invoke(rdp))
2029 struct rcu_state *
rsp;
2033 trace_rcu_utilization(
"Start RCU core");
2035 __rcu_process_callbacks(rsp);
2036 trace_rcu_utilization("End RCU
core");
2050 if (
likely(!rsp->boost)) {
2051 rcu_do_batch(rsp, rdp);
2054 invoke_rcu_callbacks_kthread();
2057 static void invoke_rcu_core(
void)
2065 static void __call_rcu_core(
struct rcu_state *rsp,
struct rcu_data *rdp,
2089 rcu_process_gp_end(rsp, rdp);
2090 check_for_new_grace_period(rsp, rdp);
2093 if (!rcu_gp_in_progress(rsp)) {
2094 unsigned long nestflag;
2095 struct rcu_node *rnp_root = rcu_get_root(rsp);
2098 rcu_start_gp(rsp, nestflag);
2104 force_quiescent_state(rsp);
2113 struct rcu_state *rsp,
bool lazy)
2115 unsigned long flags;
2119 debug_rcu_head_queue(head);
2143 rcu_idle_count_callbacks_posted();
2155 __call_rcu_core(rsp, rdp, head, flags);
2164 __call_rcu(head,
func, &rcu_sched_state, 0);
2173 __call_rcu(head,
func, &rcu_bh_state, 0);
2186 static inline int rcu_blocking_is_gp(
void)
2225 "Illegal synchronize_sched() in RCU-sched read-side critical section");
2226 if (rcu_blocking_is_gp())
2246 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2247 if (rcu_blocking_is_gp())
2256 static int synchronize_sched_expedited_cpu_stop(
void *
data)
2312 int firstsnap,
s,
snap, trycount = 0;
2324 synchronize_sched_expedited_cpu_stop,
2329 if (trycount++ < 10) {
2338 if (
UINT_CMP_GE((
unsigned)s, (
unsigned)firstsnap)) {
2352 snap =
atomic_read(&sync_sched_expedited_started);
2368 }
while (
atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
2381 static int __rcu_pending(
struct rcu_state *rsp,
struct rcu_data *rdp)
2388 check_cpu_stall(rsp, rdp);
2391 if (rcu_scheduler_fully_active &&
2400 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
2406 if (cpu_needs_another_gp(rsp, rdp)) {
2433 static int rcu_pending(
int cpu)
2435 struct rcu_state *
rsp;
2448 static
int rcu_cpu_has_callbacks(
int cpu)
2450 struct rcu_state *
rsp;
2463 static
void _rcu_barrier_trace(
struct rcu_state *rsp,
char *
s,
2464 int cpu,
unsigned long done)
2474 static void rcu_barrier_callback(
struct rcu_head *rhp)
2477 struct rcu_state *rsp = rdp->
rsp;
2490 static void rcu_barrier_func(
void *
type)
2492 struct rcu_state *rsp =
type;
2504 static void _rcu_barrier(
struct rcu_state *rsp)
2509 unsigned long snap_done;
2511 _rcu_barrier_trace(rsp,
"Begin", -1, snap);
2529 _rcu_barrier_trace(rsp,
"Check", -1, snap_done);
2530 if (
ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
2531 _rcu_barrier_trace(rsp,
"EarlyExit", -1, snap_done);
2565 _rcu_barrier_trace(rsp,
"OnlineQ", cpu,
2569 _rcu_barrier_trace(rsp,
"OnlineNQ", cpu,
2601 _rcu_barrier(&rcu_bh_state);
2610 _rcu_barrier(&rcu_sched_state);
2618 rcu_boot_init_percpu_data(
int cpu,
struct rcu_state *rsp)
2620 unsigned long flags;
2622 struct rcu_node *rnp = rcu_get_root(rsp);
2627 init_callback_list(rdp);
2633 #ifdef CONFIG_RCU_USER_QS
2648 rcu_init_percpu_data(
int cpu,
struct rcu_state *rsp,
int preemptible)
2650 unsigned long flags;
2653 struct rcu_node *rnp = rcu_get_root(rsp);
2665 init_callback_list(rdp);
2669 rcu_prepare_for_idle_init(cpu);
2680 if (rnp == rdp->
mynode) {
2700 static void __cpuinit rcu_prepare_cpu(
int cpu)
2702 struct rcu_state *
rsp;
2705 rcu_init_percpu_data(cpu, rsp,
2713 unsigned long action,
void *hcpu)
2715 long cpu = (
long)hcpu;
2718 struct rcu_state *
rsp;
2720 trace_rcu_utilization(
"Start CPU hotplug");
2724 rcu_prepare_cpu(cpu);
2725 rcu_prepare_kthreads(cpu);
2729 rcu_boost_kthread_setaffinity(rnp, -1);
2732 rcu_boost_kthread_setaffinity(rnp, cpu);
2742 rcu_cleanup_dying_cpu(rsp);
2743 rcu_cleanup_after_idle(cpu);
2750 rcu_cleanup_dead_cpu(cpu, rsp);
2755 trace_rcu_utilization("End
CPU hotplug");
2762 static
int __init rcu_spawn_gp_kthread(
void)
2764 unsigned long flags;
2766 struct rcu_state *
rsp;
2772 rnp = rcu_get_root(rsp);
2793 rcu_scheduler_active = 1;
2800 #ifdef CONFIG_RCU_FANOUT_EXACT
2801 static void __init rcu_init_levelspread(
struct rcu_state *rsp)
2805 for (i = rcu_num_lvls - 1; i > 0; i--)
2810 static void __init rcu_init_levelspread(
struct rcu_state *rsp)
2817 for (i = rcu_num_lvls - 1; i >= 0; i--) {
2828 static void __init rcu_init_one(
struct rcu_state *rsp,
2831 static char *
buf[] = {
"rcu_node_0",
2835 static char *fqs[] = {
"rcu_node_fqs_0",
2850 for (i = 1; i < rcu_num_lvls; i++)
2852 rcu_init_levelspread(rsp);
2856 for (i = rcu_num_lvls - 1; i >= 0; i--) {
2859 for (j = 0; j < rsp->
levelcnt[
i]; j++, rnp++) {
2862 &rcu_node_class[i], buf[i]);
2865 &rcu_fqs_class[i], fqs[i]);
2870 rnp->
grplo = j * cpustride;
2871 rnp->
grphi = (j + 1) * cpustride - 1;
2891 rnp = rsp->
level[rcu_num_lvls - 1];
2893 while (i > rnp->
grphi)
2896 rcu_boot_init_percpu_data(i, rsp);
2898 list_add(&rsp->
flavors, &rcu_struct_flavors);
2906 static void __init rcu_init_geometry(
void)
2914 if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
2923 rcu_capacity[0] = 1;
2924 rcu_capacity[1] = rcu_fanout_leaf;
2926 rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
2936 if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
2937 rcu_fanout_leaf >
sizeof(
unsigned long) * 8 ||
2945 if (n <= rcu_capacity[i]) {
2946 for (j = 0; j <=
i; j++)
2958 rcu_num_nodes += num_rcu_lvl[i];
2966 rcu_bootup_announce();
2967 rcu_init_geometry();
2968 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
2969 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
2970 __rcu_init_preempt();
2981 check_cpu_stall_init();