Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
atomic.h
Go to the documentation of this file.
1 #ifndef _ASM_X86_ATOMIC_H
2 #define _ASM_X86_ATOMIC_H
3 
4 #include <linux/compiler.h>
5 #include <linux/types.h>
6 #include <asm/processor.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
9 
10 /*
11  * Atomic operations that C can't guarantee us. Useful for
12  * resource counting etc..
13  */
14 
15 #define ATOMIC_INIT(i) { (i) }
16 
23 static inline int atomic_read(const atomic_t *v)
24 {
25  return (*(volatile int *)&(v)->counter);
26 }
27 
35 static inline void atomic_set(atomic_t *v, int i)
36 {
37  v->counter = i;
38 }
39 
47 static inline void atomic_add(int i, atomic_t *v)
48 {
49  asm volatile(LOCK_PREFIX "addl %1,%0"
50  : "+m" (v->counter)
51  : "ir" (i));
52 }
53 
61 static inline void atomic_sub(int i, atomic_t *v)
62 {
63  asm volatile(LOCK_PREFIX "subl %1,%0"
64  : "+m" (v->counter)
65  : "ir" (i));
66 }
67 
77 static inline int atomic_sub_and_test(int i, atomic_t *v)
78 {
79  unsigned char c;
80 
81  asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
82  : "+m" (v->counter), "=qm" (c)
83  : "ir" (i) : "memory");
84  return c;
85 }
86 
93 static inline void atomic_inc(atomic_t *v)
94 {
95  asm volatile(LOCK_PREFIX "incl %0"
96  : "+m" (v->counter));
97 }
98 
105 static inline void atomic_dec(atomic_t *v)
106 {
107  asm volatile(LOCK_PREFIX "decl %0"
108  : "+m" (v->counter));
109 }
110 
119 static inline int atomic_dec_and_test(atomic_t *v)
120 {
121  unsigned char c;
122 
123  asm volatile(LOCK_PREFIX "decl %0; sete %1"
124  : "+m" (v->counter), "=qm" (c)
125  : : "memory");
126  return c != 0;
127 }
128 
137 static inline int atomic_inc_and_test(atomic_t *v)
138 {
139  unsigned char c;
140 
141  asm volatile(LOCK_PREFIX "incl %0; sete %1"
142  : "+m" (v->counter), "=qm" (c)
143  : : "memory");
144  return c != 0;
145 }
146 
156 static inline int atomic_add_negative(int i, atomic_t *v)
157 {
158  unsigned char c;
159 
160  asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
161  : "+m" (v->counter), "=qm" (c)
162  : "ir" (i) : "memory");
163  return c;
164 }
165 
173 static inline int atomic_add_return(int i, atomic_t *v)
174 {
175 #ifdef CONFIG_M386
176  int __i;
177  unsigned long flags;
178  if (unlikely(boot_cpu_data.x86 <= 3))
179  goto no_xadd;
180 #endif
181  /* Modern 486+ processor */
182  return i + xadd(&v->counter, i);
183 
184 #ifdef CONFIG_M386
185 no_xadd: /* Legacy 386 processor */
186  raw_local_irq_save(flags);
187  __i = atomic_read(v);
188  atomic_set(v, i + __i);
189  raw_local_irq_restore(flags);
190  return i + __i;
191 #endif
192 }
193 
201 static inline int atomic_sub_return(int i, atomic_t *v)
202 {
203  return atomic_add_return(-i, v);
204 }
205 
206 #define atomic_inc_return(v) (atomic_add_return(1, v))
207 #define atomic_dec_return(v) (atomic_sub_return(1, v))
208 
209 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
210 {
211  return cmpxchg(&v->counter, old, new);
212 }
213 
214 static inline int atomic_xchg(atomic_t *v, int new)
215 {
216  return xchg(&v->counter, new);
217 }
218 
228 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
229 {
230  int c, old;
231  c = atomic_read(v);
232  for (;;) {
233  if (unlikely(c == (u)))
234  break;
235  old = atomic_cmpxchg((v), c, c + (a));
236  if (likely(old == c))
237  break;
238  c = old;
239  }
240  return c;
241 }
242 
250 static inline short int atomic_inc_short(short int *v)
251 {
252  asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
253  return *v;
254 }
255 
256 #ifdef CONFIG_X86_64
257 
265 static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
266 {
267  asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
268 }
269 #endif
270 
271 /* These are x86-specific, used by some header files */
272 #define atomic_clear_mask(mask, addr) \
273  asm volatile(LOCK_PREFIX "andl %0,%1" \
274  : : "r" (~(mask)), "m" (*(addr)) : "memory")
275 
276 #define atomic_set_mask(mask, addr) \
277  asm volatile(LOCK_PREFIX "orl %0,%1" \
278  : : "r" ((unsigned)(mask)), "m" (*(addr)) \
279  : "memory")
280 
281 /* Atomic operations are already serializing on x86 */
282 #define smp_mb__before_atomic_dec() barrier()
283 #define smp_mb__after_atomic_dec() barrier()
284 #define smp_mb__before_atomic_inc() barrier()
285 #define smp_mb__after_atomic_inc() barrier()
286 
287 #ifdef CONFIG_X86_32
288 # include <asm/atomic64_32.h>
289 #else
290 # include <asm/atomic64_64.h>
291 #endif
292 
293 #endif /* _ASM_X86_ATOMIC_H */