Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
Macros | Functions | Variables
rcutree.c File Reference
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/nmi.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <linux/kernel_stat.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
#include <linux/delay.h>
#include <linux/stop_machine.h>
#include <linux/random.h>
#include "rcutree.h"
#include <trace/events/rcu.h>
#include "rcu.h"
#include "rcutree_plugin.h"

Go to the source code of this file.

Macros

#define RCU_STATE_INITIALIZER(sname, cr)
 

Functions

 DEFINE_PER_CPU (struct rcu_data, rcu_sched_data)
 
 DEFINE_PER_CPU (struct rcu_data, rcu_bh_data)
 
 LIST_HEAD (rcu_struct_flavors)
 
 module_param (rcu_fanout_leaf, int, 0444)
 
 EXPORT_SYMBOL_GPL (rcu_scheduler_active)
 
void rcu_sched_qs (int cpu)
 
void rcu_bh_qs (int cpu)
 
void rcu_note_context_switch (int cpu)
 
 EXPORT_SYMBOL_GPL (rcu_note_context_switch)
 
 DEFINE_PER_CPU (struct rcu_dynticks, rcu_dynticks)
 
 module_param (blimit, long, 0444)
 
 module_param (qhimark, long, 0444)
 
 module_param (qlowmark, long, 0444)
 
 module_param (rcu_cpu_stall_suppress, int, 0644)
 
 module_param (rcu_cpu_stall_timeout, int, 0644)
 
 module_param (jiffies_till_first_fqs, ulong, 0644)
 
 module_param (jiffies_till_next_fqs, ulong, 0644)
 
long rcu_batches_completed_sched (void)
 
 EXPORT_SYMBOL_GPL (rcu_batches_completed_sched)
 
long rcu_batches_completed_bh (void)
 
 EXPORT_SYMBOL_GPL (rcu_batches_completed_bh)
 
void rcu_bh_force_quiescent_state (void)
 
 EXPORT_SYMBOL_GPL (rcu_bh_force_quiescent_state)
 
void rcutorture_record_test_transition (void)
 
 EXPORT_SYMBOL_GPL (rcutorture_record_test_transition)
 
void rcutorture_record_progress (unsigned long vernum)
 
 EXPORT_SYMBOL_GPL (rcutorture_record_progress)
 
void rcu_sched_force_quiescent_state (void)
 
 EXPORT_SYMBOL_GPL (rcu_sched_force_quiescent_state)
 
void rcu_idle_enter (void)
 
 EXPORT_SYMBOL_GPL (rcu_idle_enter)
 
void rcu_irq_exit (void)
 
void rcu_idle_exit (void)
 
 EXPORT_SYMBOL_GPL (rcu_idle_exit)
 
void rcu_irq_enter (void)
 
void rcu_nmi_enter (void)
 
void rcu_nmi_exit (void)
 
int rcu_is_cpu_idle (void)
 
 EXPORT_SYMBOL (rcu_is_cpu_idle)
 
int rcu_is_cpu_rrupt_from_idle (void)
 
void rcu_cpu_stall_reset (void)
 
int rcu_gp_fqs (struct rcu_state *rsp, int fqs_state_in)
 
void rcu_check_callbacks (int cpu, int user)
 
void call_rcu_sched (struct rcu_head *head, void(*func)(struct rcu_head *rcu))
 
 EXPORT_SYMBOL_GPL (call_rcu_sched)
 
void call_rcu_bh (struct rcu_head *head, void(*func)(struct rcu_head *rcu))
 
 EXPORT_SYMBOL_GPL (call_rcu_bh)
 
void synchronize_sched (void)
 
 EXPORT_SYMBOL_GPL (synchronize_sched)
 
void synchronize_rcu_bh (void)
 
 EXPORT_SYMBOL_GPL (synchronize_rcu_bh)
 
void synchronize_sched_expedited (void)
 
 EXPORT_SYMBOL_GPL (synchronize_sched_expedited)
 
void rcu_barrier_bh (void)
 
 EXPORT_SYMBOL_GPL (rcu_barrier_bh)
 
void rcu_barrier_sched (void)
 
 EXPORT_SYMBOL_GPL (rcu_barrier_sched)
 
 early_initcall (rcu_spawn_gp_kthread)
 
void rcu_scheduler_starting (void)
 
void __init rcu_init (void)
 

Variables

struct rcu_state rcu_sched_state
 
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh)
 
int rcu_num_lvls __read_mostly = RCU_NUM_LVLS
 
unsigned long rcutorture_testseq
 
unsigned long rcutorture_vernum
 

Macro Definition Documentation

#define RCU_STATE_INITIALIZER (   sname,
  cr 
)
Value:
{ \
.level = { &sname##_state.node[0] }, \
.call = cr, \
.fqs_state = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
.name = #sname, \
}

Definition at line 67 of file rcutree.c.

Function Documentation

void call_rcu_bh ( struct rcu_head head,
void(*)(struct rcu_head *rcu)  func 
)

Definition at line 2171 of file rcutree.c.

void call_rcu_sched ( struct rcu_head head,
void(*)(struct rcu_head *rcu)  func 
)

call_rcu_sched() - Queue an RCU for invocation after sched grace period. : structure to be used for queueing the RCU updates. : actual callback function to be invoked after the grace period

The callback function will be invoked some time after a full grace period elapses, in other words after all currently executing RCU read-side critical sections have completed. call_rcu_sched() assumes that the read-side critical sections end on enabling of preemption or on voluntary preemption. RCU read-side critical sections are delimited by :

  • rcu_read_lock_sched() and rcu_read_unlock_sched(), OR anything that disables preemption. These may be nested.

Definition at line 2162 of file rcutree.c.

DEFINE_PER_CPU ( struct rcu_data  ,
rcu_sched_data   
)
DEFINE_PER_CPU ( struct rcu_data  ,
rcu_bh_data   
)
DEFINE_PER_CPU ( struct rcu_dynticks  ,
rcu_dynticks   
)
early_initcall ( rcu_spawn_gp_kthread  )
EXPORT_SYMBOL ( rcu_is_cpu_idle  )
EXPORT_SYMBOL_GPL ( rcu_scheduler_active  )
EXPORT_SYMBOL_GPL ( rcu_note_context_switch  )
EXPORT_SYMBOL_GPL ( rcu_batches_completed_sched  )
EXPORT_SYMBOL_GPL ( rcu_batches_completed_bh  )
EXPORT_SYMBOL_GPL ( rcu_bh_force_quiescent_state  )
EXPORT_SYMBOL_GPL ( rcutorture_record_test_transition  )
EXPORT_SYMBOL_GPL ( rcutorture_record_progress  )
EXPORT_SYMBOL_GPL ( rcu_sched_force_quiescent_state  )
EXPORT_SYMBOL_GPL ( rcu_idle_enter  )
EXPORT_SYMBOL_GPL ( rcu_idle_exit  )
EXPORT_SYMBOL_GPL ( call_rcu_sched  )
EXPORT_SYMBOL_GPL ( call_rcu_bh  )
EXPORT_SYMBOL_GPL ( synchronize_sched  )
EXPORT_SYMBOL_GPL ( synchronize_rcu_bh  )
EXPORT_SYMBOL_GPL ( synchronize_sched_expedited  )
EXPORT_SYMBOL_GPL ( rcu_barrier_bh  )
EXPORT_SYMBOL_GPL ( rcu_barrier_sched  )
LIST_HEAD ( rcu_struct_flavors  )
module_param ( rcu_fanout_leaf  ,
int  ,
0444   
)
module_param ( blimit  ,
long  ,
0444   
)
module_param ( qhimark  ,
long  ,
0444   
)
module_param ( qlowmark  ,
long  ,
0444   
)
module_param ( rcu_cpu_stall_suppress  ,
int  ,
0644   
)
module_param ( rcu_cpu_stall_timeout  ,
int  ,
0644   
)
module_param ( jiffies_till_first_fqs  ,
ulong  ,
0644   
)
module_param ( jiffies_till_next_fqs  ,
ulong  ,
0644   
)
void rcu_barrier_bh ( void  )

rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.

Definition at line 2599 of file rcutree.c.

void rcu_barrier_sched ( void  )

rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.

Definition at line 2608 of file rcutree.c.

long rcu_batches_completed_bh ( void  )

Definition at line 251 of file rcutree.c.

long rcu_batches_completed_sched ( void  )

Definition at line 242 of file rcutree.c.

void rcu_bh_force_quiescent_state ( void  )

Definition at line 260 of file rcutree.c.

void rcu_bh_qs ( int  cpu)

Definition at line 184 of file rcutree.c.

void rcu_check_callbacks ( int  cpu,
int  user 
)

Definition at line 1865 of file rcutree.c.

void rcu_cpu_stall_reset ( void  )

rcu_cpu_stall_reset - prevent further stall warnings in current grace period

Set the stall-warning timeout way off into the future, thus preventing any RCU CPU stall-warning messages from appearing in the current set of RCU grace periods.

The caller must disable hard irqs.

Definition at line 1008 of file rcutree.c.

int rcu_gp_fqs ( struct rcu_state rsp,
int  fqs_state_in 
)

Definition at line 1245 of file rcutree.c.

void rcu_idle_enter ( void  )

rcu_idle_enter - inform RCU that current CPU is entering idle

Enter idle mode, in other words, -leave- the mode in which RCU read-side critical sections can occur. (Though RCU read-side critical sections can occur in irq handlers in idle, a possibility handled by irq_enter() and irq_exit().)

We crowbar the ->dynticks_nesting field to zero to allow for the possibility of usermode upcalls having messed up our count of interrupt nesting level during the prior busy period.

Definition at line 398 of file rcutree.c.

void rcu_idle_exit ( void  )

rcu_idle_exit - inform RCU that current CPU is leaving idle

Exit idle mode, in other words, -enter- the mode in which RCU read-side critical sections can occur.

We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to allow for the possibility of usermode upcalls messing up our count of interrupt nesting level during the busy period that is just now starting.

Definition at line 559 of file rcutree.c.

void __init rcu_init ( void  )

Definition at line 2962 of file rcutree.c.

void rcu_irq_enter ( void  )

rcu_irq_enter - inform RCU that current CPU is entering irq away from idle

Enter an interrupt handler, which might possibly result in exiting idle mode, in other words, entering the mode in which read-side critical sections can occur.

Note that the Linux kernel is fully capable of entering an interrupt handler that it never exits, for example when doing upcalls to user mode! This code assumes that the idle loop never does upcalls to user mode. If your architecture does do upcalls from the idle loop (or does anything else that results in unbalanced calls to the irq_enter() and irq_exit() functions), RCU will give you what you deserve, good and hard. But very infrequently and irreproducibly.

Use things like work queues to work around this limitation.

You have been warned.

Definition at line 643 of file rcutree.c.

void rcu_irq_exit ( void  )

rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle

Exit from an interrupt handler, which might possibly result in entering idle mode, in other words, leaving the mode in which read-side critical sections can occur.

This code assumes that the idle loop never does anything that might result in unbalanced calls to irq_enter() and irq_exit(). If your architecture violates this assumption, RCU will give you what you deserve, good and hard. But very infrequently and irreproducibly.

Use things like work queues to work around this limitation.

You have been warned.

Definition at line 482 of file rcutree.c.

int rcu_is_cpu_idle ( void  )

rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle

If the current CPU is in its idle loop and is neither in an interrupt or NMI handler, return true.

Definition at line 710 of file rcutree.c.

int rcu_is_cpu_rrupt_from_idle ( void  )

rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle

If the current CPU is idle or running at a first-level (not nested) interrupt from idle, return true. The caller must have at least disabled preemption.

Definition at line 786 of file rcutree.c.

void rcu_nmi_enter ( void  )

rcu_nmi_enter - inform RCU of entry to NMI context

If the CPU was idle with dynamic ticks active, and there is no irq handler running, this updates rdtp->dynticks_nmi to let the RCU grace-period handling know that the CPU is active.

Definition at line 668 of file rcutree.c.

void rcu_nmi_exit ( void  )

rcu_nmi_exit - inform RCU of exit from NMI context

If the CPU was idle with dynamic ticks active, and there is no irq handler running, this updates rdtp->dynticks_nmi to let the RCU grace-period handling know that the CPU is no longer active.

Definition at line 690 of file rcutree.c.

void rcu_note_context_switch ( int  cpu)

Definition at line 198 of file rcutree.c.

void rcu_sched_force_quiescent_state ( void  )

Definition at line 294 of file rcutree.c.

void rcu_sched_qs ( int  cpu)

Definition at line 175 of file rcutree.c.

void rcu_scheduler_starting ( void  )

Definition at line 2789 of file rcutree.c.

void rcutorture_record_progress ( unsigned long  vernum)

Definition at line 285 of file rcutree.c.

void rcutorture_record_test_transition ( void  )

Definition at line 273 of file rcutree.c.

void synchronize_rcu_bh ( void  )

synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.

Control will return to the caller some time after a full rcu_bh grace period has elapsed, in other words after all currently executing rcu_bh read-side critical sections have completed. RCU read-side critical sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), and may be nested.

Definition at line 2241 of file rcutree.c.

void synchronize_sched ( void  )

synchronize_sched - wait until an rcu-sched grace period has elapsed.

Control will return to the caller some time after a full rcu-sched grace period has elapsed, in other words after all currently executing rcu-sched read-side critical sections have completed. These read-side critical sections are delimited by rcu_read_lock_sched() and rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), local_irq_disable(), and so on may be used in place of rcu_read_lock_sched().

This means that all preempt_disable code sequences, including NMI and hardware-interrupt handlers, in progress on entry will have completed before this primitive returns. However, this does not guarantee that softirq handlers will have completed, since in some kernels, these handlers can run in process context, and can block.

This primitive provides the guarantees made by the (now removed) synchronize_kernel() API. In contrast, synchronize_rcu() only guarantees that rcu_read_lock() sections will have completed. In "classic RCU", these two guarantees happen to be one and the same, but can differ in realtime RCU implementations.

Definition at line 2220 of file rcutree.c.

void synchronize_sched_expedited ( void  )

synchronize_sched_expedited - Brute-force RCU-sched grace period

Wait for an RCU-sched grace period to elapse, but use a "big hammer" approach to force the grace period to end quickly. This consumes significant time on all CPUs and is unfriendly to real-time workloads, so is thus not recommended for any sort of common-case code. In fact, if you are using synchronize_sched_expedited() in a loop, please restructure your code to batch your updates, and then use a single synchronize_sched() instead.

Note that it is illegal to call this function while holding any lock that is acquired by a CPU-hotplug notifier. And yes, it is also illegal to call this function from a CPU-hotplug notifier. Failing to observe these restriction will result in deadlock.

This implementation can be thought of as an application of ticket locking to RCU, with sync_sched_expedited_started and sync_sched_expedited_done taking on the roles of the halves of the ticket-lock word. Each task atomically increments sync_sched_expedited_started upon entry, snapshotting the old value, then attempts to stop all the CPUs. If this succeeds, then each CPU will have executed a context switch, resulting in an RCU-sched grace period. We are then done, so we use atomic_cmpxchg() to update sync_sched_expedited_done to match our snapshot – but only if someone else has not already advanced past our snapshot.

On the other hand, if try_stop_cpus() fails, we check the value of sync_sched_expedited_done. If it has advanced past our initial snapshot, then someone else must have forced a grace period some time after we took our snapshot. In this case, our work is done for us, and we can simply return. Otherwise, we try again, but keep our initial snapshot for purposes of checking for someone doing our work for us.

If we fail too many times in a row, we fall back to synchronize_sched().

Definition at line 2310 of file rcutree.c.

Variable Documentation

int rcu_cpu_stall_timeout __read_mostly = RCU_NUM_LVLS

Definition at line 94 of file rcutree.c.

Definition at line 85 of file rcutree.c.

struct rcu_state rcu_sched_state
Initial value:

Definition at line 81 of file rcutree.c.

unsigned long rcutorture_testseq

Definition at line 156 of file rcutree.c.

unsigned long rcutorture_vernum

Definition at line 157 of file rcutree.c.