Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
atomic.h
Go to the documentation of this file.
1 #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2 #define _ASM_GENERIC_BITOPS_ATOMIC_H_
3 
4 #include <asm/types.h>
5 #include <linux/irqflags.h>
6 
7 #ifdef CONFIG_SMP
8 #include <asm/spinlock.h>
9 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
10 
11 /* Use an array of spinlocks for our atomic_ts.
12  * Hash function to index into a different SPINLOCK.
13  * Since "a" is usually an address, use one spinlock per cacheline.
14  */
15 # define ATOMIC_HASH_SIZE 4
16 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17 
18 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19 
20 /* Can't use raw_spin_lock_irq because of #include problems, so
21  * this is the substitute */
22 #define _atomic_spin_lock_irqsave(l,f) do { \
23  arch_spinlock_t *s = ATOMIC_HASH(l); \
24  local_irq_save(f); \
25  arch_spin_lock(s); \
26 } while(0)
27 
28 #define _atomic_spin_unlock_irqrestore(l,f) do { \
29  arch_spinlock_t *s = ATOMIC_HASH(l); \
30  arch_spin_unlock(s); \
31  local_irq_restore(f); \
32 } while(0)
33 
34 
35 #else
36 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
37 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
38 #endif
39 
40 /*
41  * NMI events can occur at any time, including when interrupts have been
42  * disabled by *_irqsave(). So you can get NMI events occurring while a
43  * *_bit function is holding a spin lock. If the NMI handler also wants
44  * to do bit manipulation (and they do) then you can get a deadlock
45  * between the original caller of *_bit() and the NMI handler.
46  *
47  * by Keith Owens
48  */
49 
65 static inline void set_bit(int nr, volatile unsigned long *addr)
66 {
67  unsigned long mask = BIT_MASK(nr);
68  unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
69  unsigned long flags;
70 
71  _atomic_spin_lock_irqsave(p, flags);
72  *p |= mask;
74 }
75 
86 static inline void clear_bit(int nr, volatile unsigned long *addr)
87 {
88  unsigned long mask = BIT_MASK(nr);
89  unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
90  unsigned long flags;
91 
92  _atomic_spin_lock_irqsave(p, flags);
93  *p &= ~mask;
95 }
96 
107 static inline void change_bit(int nr, volatile unsigned long *addr)
108 {
109  unsigned long mask = BIT_MASK(nr);
110  unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
111  unsigned long flags;
112 
113  _atomic_spin_lock_irqsave(p, flags);
114  *p ^= mask;
116 }
117 
127 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
128 {
129  unsigned long mask = BIT_MASK(nr);
130  unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
131  unsigned long old;
132  unsigned long flags;
133 
134  _atomic_spin_lock_irqsave(p, flags);
135  old = *p;
136  *p = old | mask;
138 
139  return (old & mask) != 0;
140 }
141 
151 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
152 {
153  unsigned long mask = BIT_MASK(nr);
154  unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
155  unsigned long old;
156  unsigned long flags;
157 
158  _atomic_spin_lock_irqsave(p, flags);
159  old = *p;
160  *p = old & ~mask;
162 
163  return (old & mask) != 0;
164 }
165 
174 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
175 {
176  unsigned long mask = BIT_MASK(nr);
177  unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
178  unsigned long old;
179  unsigned long flags;
180 
181  _atomic_spin_lock_irqsave(p, flags);
182  old = *p;
183  *p = old ^ mask;
185 
186  return (old & mask) != 0;
187 }
188 
189 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */