Go to the documentation of this file.
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
23 #include <linux/compiler.h>
24 #include <linux/types.h>
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
29 #define ATOMIC_INIT(i) { (i) }
38 #define atomic_read(v) (*(volatile int *)&(v)->counter)
39 #define atomic_set(v,i) (((v)->counter) = (i))
51 asm volatile(
"// atomic_add\n"
53 " add %w0, %w0, %w4\n"
54 " stxr %w1, %w0, [%3]\n"
66 asm volatile(
"// atomic_add_return\n"
67 "1: ldaxr %w0, [%3]\n"
68 " add %w0, %w0, %w4\n"
69 " stlxr %w1, %w0, [%3]\n"
83 asm volatile(
"// atomic_sub\n"
85 " sub %w0, %w0, %w4\n"
86 " stxr %w1, %w0, [%3]\n"
98 asm volatile(
"// atomic_sub_return\n"
99 "1: ldaxr %w0, [%3]\n"
100 " sub %w0, %w0, %w4\n"
101 " stlxr %w1, %w0, [%3]\n"
115 asm volatile(
"// atomic_cmpxchg\n"
116 "1: ldaxr %w1, [%3]\n"
119 " stlxr %w0, %w5, [%3]\n"
122 :
"=&r" (
tmp),
"=&r" (oldval),
"+o" (ptr->
counter)
123 :
"r" (&ptr->
counter),
"Ir" (old),
"r" (
new)
131 unsigned long tmp, tmp2;
133 asm volatile(
"// atomic_clear_mask\n"
136 " stxr %w1, %0, [%3]\n"
138 :
"=&r" (
tmp),
"=&r" (tmp2),
"+o" (*addr)
139 :
"r" (
addr),
"Ir" (mask)
143 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
155 #define atomic_inc(v) atomic_add(1, v)
156 #define atomic_dec(v) atomic_sub(1, v)
158 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
159 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
160 #define atomic_inc_return(v) (atomic_add_return(1, v))
161 #define atomic_dec_return(v) (atomic_sub_return(1, v))
162 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
164 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
166 #define smp_mb__before_atomic_dec() smp_mb()
167 #define smp_mb__after_atomic_dec() smp_mb()
168 #define smp_mb__before_atomic_inc() smp_mb()
169 #define smp_mb__after_atomic_inc() smp_mb()
174 #define ATOMIC64_INIT(i) { (i) }
176 #define atomic64_read(v) (*(volatile long long *)&(v)->counter)
177 #define atomic64_set(v,i) (((v)->counter) = (i))
184 asm volatile(
"// atomic64_add\n"
187 " stxr %w1, %0, [%3]\n"
199 asm volatile(
"// atomic64_add_return\n"
200 "1: ldaxr %0, [%3]\n"
202 " stlxr %w1, %0, [%3]\n"
216 asm volatile(
"// atomic64_sub\n"
219 " stxr %w1, %0, [%3]\n"
231 asm volatile(
"// atomic64_sub_return\n"
232 "1: ldaxr %0, [%3]\n"
234 " stlxr %w1, %0, [%3]\n"
248 asm volatile(
"// atomic64_cmpxchg\n"
249 "1: ldaxr %1, [%3]\n"
252 " stlxr %w0, %5, [%3]\n"
255 :
"=&r" (
res),
"=&r" (oldval),
"+o" (ptr->
counter)
256 :
"r" (&ptr->
counter),
"Ir" (old),
"r" (
new)
262 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
269 asm volatile(
"// atomic64_dec_if_positive\n"
270 "1: ldaxr %0, [%3]\n"
273 " stlxr %w1, %0, [%3]\n"
294 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
295 #define atomic64_inc(v) atomic64_add(1LL, (v))
296 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
297 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
298 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
299 #define atomic64_dec(v) atomic64_sub(1LL, (v))
300 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
301 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
302 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)