Go to the documentation of this file. 1 #ifndef ASM_X86_CMPXCHG_H
2 #define ASM_X86_CMPXCHG_H
4 #include <linux/compiler.h>
27 #define __X86_CASE_B 1
28 #define __X86_CASE_W 2
29 #define __X86_CASE_L 4
31 #define __X86_CASE_Q 8
33 #define __X86_CASE_Q -1
40 #define __xchg_op(ptr, arg, op, lock) \
42 __typeof__ (*(ptr)) __ret = (arg); \
43 switch (sizeof(*(ptr))) { \
45 asm volatile (lock #op "b %b0, %1\n" \
46 : "+q" (__ret), "+m" (*(ptr)) \
47 : : "memory", "cc"); \
50 asm volatile (lock #op "w %w0, %1\n" \
51 : "+r" (__ret), "+m" (*(ptr)) \
52 : : "memory", "cc"); \
55 asm volatile (lock #op "l %0, %1\n" \
56 : "+r" (__ret), "+m" (*(ptr)) \
57 : : "memory", "cc"); \
60 asm volatile (lock #op "q %q0, %1\n" \
61 : "+r" (__ret), "+m" (*(ptr)) \
62 : : "memory", "cc"); \
65 __ ## op ## _wrong_size(); \
76 #define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
83 #define __raw_cmpxchg(ptr, old, new, size, lock) \
85 __typeof__(*(ptr)) __ret; \
86 __typeof__(*(ptr)) __old = (old); \
87 __typeof__(*(ptr)) __new = (new); \
91 volatile u8 *__ptr = (volatile u8 *)(ptr); \
92 asm volatile(lock "cmpxchgb %2,%1" \
93 : "=a" (__ret), "+m" (*__ptr) \
94 : "q" (__new), "0" (__old) \
100 volatile u16 *__ptr = (volatile u16 *)(ptr); \
101 asm volatile(lock "cmpxchgw %2,%1" \
102 : "=a" (__ret), "+m" (*__ptr) \
103 : "r" (__new), "0" (__old) \
109 volatile u32 *__ptr = (volatile u32 *)(ptr); \
110 asm volatile(lock "cmpxchgl %2,%1" \
111 : "=a" (__ret), "+m" (*__ptr) \
112 : "r" (__new), "0" (__old) \
118 volatile u64 *__ptr = (volatile u64 *)(ptr); \
119 asm volatile(lock "cmpxchgq %2,%1" \
120 : "=a" (__ret), "+m" (*__ptr) \
121 : "r" (__new), "0" (__old) \
126 __cmpxchg_wrong_size(); \
131 #define __cmpxchg(ptr, old, new, size) \
132 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
134 #define __sync_cmpxchg(ptr, old, new, size) \
135 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
137 #define __cmpxchg_local(ptr, old, new, size) \
138 __raw_cmpxchg((ptr), (old), (new), (size), "")
141 # include <asm/cmpxchg_32.h>
143 # include <asm/cmpxchg_64.h>
146 #ifdef __HAVE_ARCH_CMPXCHG
147 #define cmpxchg(ptr, old, new) \
148 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
150 #define sync_cmpxchg(ptr, old, new) \
151 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
153 #define cmpxchg_local(ptr, old, new) \
154 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
165 #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
166 #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
167 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
168 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
170 #define __add(ptr, inc, lock) \
172 __typeof__ (*(ptr)) __ret = (inc); \
173 switch (sizeof(*(ptr))) { \
175 asm volatile (lock "addb %b1, %0\n" \
176 : "+m" (*(ptr)) : "qi" (inc) \
180 asm volatile (lock "addw %w1, %0\n" \
181 : "+m" (*(ptr)) : "ri" (inc) \
185 asm volatile (lock "addl %1, %0\n" \
186 : "+m" (*(ptr)) : "ri" (inc) \
190 asm volatile (lock "addq %1, %0\n" \
191 : "+m" (*(ptr)) : "ri" (inc) \
195 __add_wrong_size(); \
207 #define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
208 #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
210 #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
213 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
214 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
215 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
216 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
217 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
218 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
219 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
220 : "=a" (__ret), "+d" (__old2), \
221 "+m" (*(p1)), "+m" (*(p2)) \
222 : "i" (2 * sizeof(long)), "a" (__old1), \
223 "b" (__new1), "c" (__new2)); \
227 #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
228 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
230 #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
231 __cmpxchg_double(, p1, p2, o1, o2, n1, n2)