Go to the documentation of this file. 1 #ifndef LINUX_HARDIRQ_H
2 #define LINUX_HARDIRQ_H
7 #include <asm/hardirq.h>
32 #define PREEMPT_BITS 8
33 #define SOFTIRQ_BITS 8
36 #define MAX_HARDIRQ_BITS 10
39 # define HARDIRQ_BITS MAX_HARDIRQ_BITS
42 #if HARDIRQ_BITS > MAX_HARDIRQ_BITS
43 #error HARDIRQ_BITS too high!
46 #define PREEMPT_SHIFT 0
47 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
49 #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
51 #define __IRQ_MASK(x) ((1UL << (x))-1)
53 #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
54 #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
55 #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
56 #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
58 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
59 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
60 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
61 #define NMI_OFFSET (1UL << NMI_SHIFT)
63 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
65 #ifndef PREEMPT_ACTIVE
66 #define PREEMPT_ACTIVE_BITS 1
67 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
68 #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
71 #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
72 #error PREEMPT_ACTIVE is too low!
75 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
76 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
77 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
86 #define in_irq() (hardirq_count())
87 #define in_softirq() (softirq_count())
88 #define in_interrupt() (irq_count())
89 #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
94 #define in_nmi() (preempt_count() & NMI_MASK)
96 #if defined(CONFIG_PREEMPT_COUNT)
97 # define PREEMPT_CHECK_OFFSET 1
99 # define PREEMPT_CHECK_OFFSET 0
109 #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
115 #define in_atomic_preempt_off() \
116 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
118 #ifdef CONFIG_PREEMPT_COUNT
119 # define preemptible() (preempt_count() == 0 && !irqs_disabled())
120 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
122 # define preemptible() 0
123 # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
126 #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
129 # define synchronize_irq(irq) barrier()
134 #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
142 #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
163 #define __irq_enter() \
165 vtime_account(current); \
166 add_preempt_count(HARDIRQ_OFFSET); \
167 trace_hardirq_enter(); \
178 #define __irq_exit() \
180 trace_hardirq_exit(); \
181 vtime_account(current); \
182 sub_preempt_count(HARDIRQ_OFFSET); \
190 #define nmi_enter() \
192 ftrace_nmi_enter(); \
194 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
197 trace_hardirq_enter(); \
202 trace_hardirq_exit(); \
206 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \