26 #include <linux/module.h>
52 #ifdef CONFIG_DEBUG_LOCK_ALLOC
57 #ifdef CONFIG_TINY_PREEMPT_RCU
62 struct rcu_preempt_ctrlblk {
88 #ifdef CONFIG_RCU_BOOST
100 #ifdef CONFIG_RCU_BOOST
101 unsigned long boost_time;
103 #ifdef CONFIG_RCU_TRACE
104 unsigned long n_grace_periods;
105 #ifdef CONFIG_RCU_BOOST
106 unsigned long n_tasks_boosted;
108 unsigned long n_exp_boosts;
110 unsigned long n_normal_boosts;
112 unsigned long n_balk_blkd_tasks;
114 unsigned long n_balk_exp_gp_tasks;
116 unsigned long n_balk_boost_tasks;
118 unsigned long n_balk_notyet;
120 unsigned long n_balk_nos;
127 static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
128 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
129 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
130 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
135 static int rcu_preempted_readers_exp(
void);
136 static void rcu_report_exp_done(
void);
141 static int rcu_cpu_blocking_cur_gp(
void)
143 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
160 static int rcu_preempt_running_reader(
void)
162 return current->rcu_read_lock_nesting;
169 static int rcu_preempt_blocked_readers_any(
void)
171 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
178 static int rcu_preempt_blocked_readers_cgp(
void)
180 return rcu_preempt_ctrlblk.gp_tasks !=
NULL;
186 static int rcu_preempt_needs_another_gp(
void)
188 return *rcu_preempt_ctrlblk.rcb.curtail !=
NULL;
195 static int rcu_preempt_gp_in_progress(
void)
197 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
208 np = t->rcu_node_entry.next;
209 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
214 #ifdef CONFIG_RCU_TRACE
216 #ifdef CONFIG_RCU_BOOST
217 static void rcu_initiate_boost_trace(
void);
223 static void show_tiny_preempt_stats(
struct seq_file *
m)
225 seq_printf(m,
"rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
226 rcu_preempt_ctrlblk.rcb.qlen,
227 rcu_preempt_ctrlblk.n_grace_periods,
228 rcu_preempt_ctrlblk.gpnum,
229 rcu_preempt_ctrlblk.gpcpu,
230 rcu_preempt_ctrlblk.completed,
231 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
232 "N."[!rcu_preempt_ctrlblk.gp_tasks],
233 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
234 #ifdef CONFIG_RCU_BOOST
235 seq_printf(m,
"%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
237 "B."[!rcu_preempt_ctrlblk.boost_tasks],
238 rcu_preempt_ctrlblk.n_tasks_boosted,
239 rcu_preempt_ctrlblk.n_exp_boosts,
240 rcu_preempt_ctrlblk.n_normal_boosts,
241 (
int)(jiffies & 0xffff),
242 (
int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
243 seq_printf(m,
"%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
245 rcu_preempt_ctrlblk.n_balk_blkd_tasks,
246 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
247 rcu_preempt_ctrlblk.n_balk_boost_tasks,
248 rcu_preempt_ctrlblk.n_balk_notyet,
249 rcu_preempt_ctrlblk.n_balk_nos);
255 #ifdef CONFIG_RCU_BOOST
259 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
264 static unsigned long have_rcu_kthread_work;
270 static int rcu_boost(
void)
277 if (rcu_preempt_ctrlblk.boost_tasks ==
NULL &&
278 rcu_preempt_ctrlblk.exp_tasks ==
NULL)
288 if (rcu_preempt_ctrlblk.boost_tasks ==
NULL &&
289 rcu_preempt_ctrlblk.exp_tasks ==
NULL) {
300 if (rcu_preempt_ctrlblk.exp_tasks !=
NULL) {
301 tb = rcu_preempt_ctrlblk.exp_tasks;
302 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
304 tb = rcu_preempt_ctrlblk.boost_tasks;
305 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
307 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
319 t->rcu_boost_mutex = &mtx;
338 static int rcu_initiate_boost(
void)
340 if (!rcu_preempt_blocked_readers_cgp() &&
341 rcu_preempt_ctrlblk.exp_tasks ==
NULL) {
342 RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
345 if (rcu_preempt_ctrlblk.exp_tasks !=
NULL ||
346 (rcu_preempt_ctrlblk.gp_tasks !=
NULL &&
347 rcu_preempt_ctrlblk.boost_tasks ==
NULL &&
348 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
349 if (rcu_preempt_ctrlblk.exp_tasks ==
NULL)
350 rcu_preempt_ctrlblk.boost_tasks =
351 rcu_preempt_ctrlblk.gp_tasks;
359 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
364 static void rcu_preempt_boost_start_gp(
void)
366 rcu_preempt_ctrlblk.boost_time =
jiffies + RCU_BOOST_DELAY_JIFFIES;
376 static int rcu_initiate_boost(
void)
378 return rcu_preempt_blocked_readers_cgp();
384 static void rcu_preempt_boost_start_gp(
void)
411 static void rcu_preempt_cpu_qs(
void)
414 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
415 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
418 if (!rcu_preempt_gp_in_progress())
424 if (rcu_initiate_boost())
428 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
429 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
430 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
433 if (!rcu_preempt_blocked_readers_any())
434 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
437 if (*rcu_preempt_ctrlblk.rcb.donetail !=
NULL)
444 static void rcu_preempt_start_gp(
void)
446 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
449 rcu_preempt_ctrlblk.gpnum++;
450 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
453 if (rcu_preempt_blocked_readers_any())
454 rcu_preempt_ctrlblk.gp_tasks =
455 rcu_preempt_ctrlblk.blkd_tasks.next;
458 rcu_preempt_boost_start_gp();
461 if (!rcu_preempt_running_reader())
462 rcu_preempt_cpu_qs();
488 if (rcu_preempt_running_reader() > 0 &&
489 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
492 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
504 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
505 if (rcu_cpu_blocking_cur_gp())
506 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
507 }
else if (rcu_preempt_running_reader() < 0 &&
508 t->rcu_read_unlock_special) {
513 rcu_read_unlock_special(t);
525 rcu_preempt_cpu_qs();
534 void rcu_read_unlock_special(
struct task_struct *t)
540 #ifdef CONFIG_RCU_BOOST
558 special = t->rcu_read_unlock_special;
559 if (special & RCU_READ_UNLOCK_NEED_QS)
560 rcu_preempt_cpu_qs();
569 if (special & RCU_READ_UNLOCK_BLOCKED) {
570 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
576 empty = !rcu_preempt_blocked_readers_cgp();
577 empty_exp = rcu_preempt_ctrlblk.exp_tasks ==
NULL;
578 np = rcu_next_node_entry(t);
579 list_del_init(&t->rcu_node_entry);
580 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
581 rcu_preempt_ctrlblk.gp_tasks = np;
582 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
583 rcu_preempt_ctrlblk.exp_tasks = np;
584 #ifdef CONFIG_RCU_BOOST
585 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
586 rcu_preempt_ctrlblk.boost_tasks = np;
594 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
595 rcu_preempt_cpu_qs();
596 rcu_preempt_start_gp();
603 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks ==
NULL)
604 rcu_report_exp_done();
606 #ifdef CONFIG_RCU_BOOST
608 if (t->rcu_boost_mutex !=
NULL) {
609 rbmp = t->rcu_boost_mutex;
610 t->rcu_boost_mutex =
NULL;
624 static void rcu_preempt_check_callbacks(
void)
628 if (rcu_preempt_gp_in_progress() &&
629 (!rcu_preempt_running_reader() ||
630 !rcu_cpu_blocking_cur_gp()))
631 rcu_preempt_cpu_qs();
632 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
633 rcu_preempt_ctrlblk.rcb.donetail)
635 if (rcu_preempt_gp_in_progress() &&
636 rcu_cpu_blocking_cur_gp() &&
637 rcu_preempt_running_reader() > 0)
638 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
649 static void rcu_preempt_remove_callbacks(
struct rcu_ctrlblk *rcp)
651 if (rcu_preempt_ctrlblk.nexttail == rcp->
donetail)
652 rcu_preempt_ctrlblk.nexttail = &rcp->
rcucblist;
658 static void rcu_preempt_process_callbacks(
void)
660 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
670 debug_rcu_head_queue(head);
675 *rcu_preempt_ctrlblk.nexttail =
head;
676 rcu_preempt_ctrlblk.nexttail = &head->next;
677 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
678 rcu_preempt_start_gp();
692 void synchronize_rcu(
void)
697 "Illegal synchronize_rcu() in RCU read-side critical section");
699 #ifdef CONFIG_DEBUG_LOCK_ALLOC
700 if (!rcu_scheduler_active)
705 if (!rcu_preempt_blocked_readers_any())
714 static unsigned long sync_rcu_preempt_exp_count;
723 static int rcu_preempted_readers_exp(
void)
725 return rcu_preempt_ctrlblk.exp_tasks !=
NULL;
733 static void rcu_report_exp_done(
void)
735 wake_up(&sync_rcu_preempt_exp_wq);
751 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
763 snap = sync_rcu_preempt_exp_count + 1;
777 rpcp->exp_tasks = rpcp->blkd_tasks.next;
778 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
779 rpcp->exp_tasks =
NULL;
782 if (!rcu_preempted_readers_exp()) {
785 rcu_initiate_boost();
788 !rcu_preempted_readers_exp());
793 sync_rcu_preempt_exp_count++;
805 return rcu_preempt_ctrlblk.rcb.rcucblist !=
NULL;
810 #ifdef CONFIG_RCU_TRACE
816 static void show_tiny_preempt_stats(
struct seq_file *m)
826 static void rcu_preempt_check_callbacks(
void)
834 static void rcu_preempt_remove_callbacks(
struct rcu_ctrlblk *rcp)
842 static void rcu_preempt_process_callbacks(
void)
848 #ifdef CONFIG_RCU_BOOST
856 have_rcu_kthread_work = 1;
857 if (rcu_kthread_task !=
NULL)
861 #ifdef CONFIG_RCU_TRACE
867 static bool rcu_is_callbacks_kthread(
void)
869 return rcu_kthread_task ==
current;
881 static int rcu_kthread(
void *
arg)
884 unsigned long morework;
889 have_rcu_kthread_work != 0);
890 morework = rcu_boost();
892 work = have_rcu_kthread_work;
893 have_rcu_kthread_work = morework;
896 rcu_process_callbacks(
NULL);
906 static int __init rcu_spawn_kthreads(
void)
927 if (rcu_scheduler_fully_active)
931 #ifdef CONFIG_RCU_TRACE
936 static bool rcu_is_callbacks_kthread(
void)
943 static int __init rcu_scheduler_really_started(
void)
945 rcu_scheduler_fully_active = 1;
954 #ifdef CONFIG_DEBUG_LOCK_ALLOC
964 rcu_scheduler_active = 1;
969 #ifdef CONFIG_RCU_TRACE
971 #ifdef CONFIG_RCU_BOOST
973 static void rcu_initiate_boost_trace(
void)
975 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
976 rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
977 else if (rcu_preempt_ctrlblk.gp_tasks ==
NULL &&
978 rcu_preempt_ctrlblk.exp_tasks ==
NULL)
979 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
980 else if (rcu_preempt_ctrlblk.boost_tasks !=
NULL)
981 rcu_preempt_ctrlblk.n_balk_boost_tasks++;
982 else if (!
ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
983 rcu_preempt_ctrlblk.n_balk_notyet++;
985 rcu_preempt_ctrlblk.n_balk_nos++;
990 static void rcu_trace_sub_qlen(
struct rcu_ctrlblk *rcp,
int n)
1004 show_tiny_preempt_stats(m);
1005 seq_printf(m,
"rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
1006 seq_printf(m,
"rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
1017 .open = show_tiny_stats_open,
1023 static struct dentry *rcudir;
1025 static int __init rcutiny_trace_init(
void)
1033 NULL, &show_tiny_stats_fops);
1042 static void __exit rcutiny_trace_cleanup(
void)