Go to the documentation of this file.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/types.h>
17 #include <asm/barrier.h>
18 #include <asm/cmpxchg.h>
20 #define ATOMIC_INIT(i) { (i) }
29 #define atomic_read(v) (*(volatile int *)&(v)->counter)
30 #define atomic_set(v,i) (((v)->counter) = (i))
32 #if __LINUX_ARM_ARCH__ >= 6
44 __asm__ __volatile__(
"@ atomic_add\n"
47 " strex %1, %0, [%3]\n"
50 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
62 __asm__ __volatile__(
"@ atomic_add_return\n"
65 " strex %1, %0, [%3]\n"
68 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
82 __asm__ __volatile__(
"@ atomic_sub\n"
85 " strex %1, %0, [%3]\n"
88 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
100 __asm__ __volatile__(
"@ atomic_sub_return\n"
101 "1: ldrex %0, [%3]\n"
103 " strex %1, %0, [%3]\n"
106 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
117 unsigned long oldval,
res;
122 __asm__ __volatile__(
"@ atomic_cmpxchg\n"
126 "strexeq %0, %5, [%3]\n"
127 :
"=&r" (res),
"=&r" (oldval),
"+Qo" (ptr->
counter)
128 :
"r" (&ptr->
counter),
"Ir" (old),
"r" (
new)
139 unsigned long tmp, tmp2;
141 __asm__ __volatile__(
"@ atomic_clear_mask\n"
142 "1: ldrex %0, [%3]\n"
144 " strex %1, %0, [%3]\n"
147 :
"=&r" (tmp),
"=&r" (tmp2),
"+Qo" (*addr)
148 :
"r" (addr),
"Ir" (mask)
155 #error SMP not supported on pre-ARMv6 CPUs
170 #define atomic_add(i, v) (void) atomic_add_return(i, v)
184 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
211 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
223 #define atomic_inc(v) atomic_add(1, v)
224 #define atomic_dec(v) atomic_sub(1, v)
226 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
227 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
228 #define atomic_inc_return(v) (atomic_add_return(1, v))
229 #define atomic_dec_return(v) (atomic_sub_return(1, v))
230 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
232 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
234 #define smp_mb__before_atomic_dec() smp_mb()
235 #define smp_mb__after_atomic_dec() smp_mb()
236 #define smp_mb__before_atomic_inc() smp_mb()
237 #define smp_mb__after_atomic_inc() smp_mb()
239 #ifndef CONFIG_GENERIC_ATOMIC64
244 #define ATOMIC64_INIT(i) { (i) }
250 __asm__ __volatile__(
"@ atomic64_read\n"
251 " ldrexd %0, %H0, [%1]"
263 __asm__ __volatile__(
"@ atomic64_set\n"
264 "1: ldrexd %0, %H0, [%2]\n"
265 " strexd %0, %3, %H3, [%2]\n"
268 :
"=&r" (tmp),
"=Qo" (v->
counter)
278 __asm__ __volatile__(
"@ atomic64_add\n"
279 "1: ldrexd %0, %H0, [%3]\n"
281 " adc %H0, %H0, %H4\n"
282 " strexd %1, %0, %H0, [%3]\n"
285 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
297 __asm__ __volatile__(
"@ atomic64_add_return\n"
298 "1: ldrexd %0, %H0, [%3]\n"
300 " adc %H0, %H0, %H4\n"
301 " strexd %1, %0, %H0, [%3]\n"
304 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
318 __asm__ __volatile__(
"@ atomic64_sub\n"
319 "1: ldrexd %0, %H0, [%3]\n"
321 " sbc %H0, %H0, %H4\n"
322 " strexd %1, %0, %H0, [%3]\n"
325 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
337 __asm__ __volatile__(
"@ atomic64_sub_return\n"
338 "1: ldrexd %0, %H0, [%3]\n"
340 " sbc %H0, %H0, %H4\n"
341 " strexd %1, %0, %H0, [%3]\n"
344 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
361 __asm__ __volatile__(
"@ atomic64_cmpxchg\n"
362 "ldrexd %1, %H1, [%3]\n"
366 "strexdeq %0, %5, %H5, [%3]"
367 :
"=&r" (res),
"=&r" (oldval),
"+Qo" (ptr->
counter)
368 :
"r" (&ptr->
counter),
"r" (old),
"r" (
new)
384 __asm__ __volatile__(
"@ atomic64_xchg\n"
385 "1: ldrexd %0, %H0, [%3]\n"
386 " strexd %1, %4, %H4, [%3]\n"
389 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (ptr->
counter)
390 :
"r" (&ptr->
counter),
"r" (
new)
405 __asm__ __volatile__(
"@ atomic64_dec_if_positive\n"
406 "1: ldrexd %0, %H0, [%3]\n"
408 " sbc %H0, %H0, #0\n"
411 " strexd %1, %0, %H0, [%3]\n"
415 :
"=&r" (result),
"=&r" (tmp),
"+Qo" (v->
counter)
432 __asm__ __volatile__(
"@ atomic64_add_unless\n"
433 "1: ldrexd %0, %H0, [%4]\n"
439 " adc %H0, %H0, %H6\n"
440 " strexd %2, %0, %H0, [%4]\n"
444 :
"=&r" (val),
"+r" (ret),
"=&r" (tmp),
"+Qo" (v->
counter)
445 :
"r" (&v->
counter),
"r" (u),
"r" (a)
454 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
455 #define atomic64_inc(v) atomic64_add(1LL, (v))
456 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
457 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
458 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
459 #define atomic64_dec(v) atomic64_sub(1LL, (v))
460 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
461 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)