Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
local.h
Go to the documentation of this file.
1 #ifndef _ASM_X86_LOCAL_H
2 #define _ASM_X86_LOCAL_H
3 
4 #include <linux/percpu.h>
5 
6 #include <linux/atomic.h>
7 #include <asm/asm.h>
8 
9 typedef struct {
11 } local_t;
12 
13 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14 
15 #define local_read(l) atomic_long_read(&(l)->a)
16 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17 
18 static inline void local_inc(local_t *l)
19 {
20  asm volatile(_ASM_INC "%0"
21  : "+m" (l->a.counter));
22 }
23 
24 static inline void local_dec(local_t *l)
25 {
26  asm volatile(_ASM_DEC "%0"
27  : "+m" (l->a.counter));
28 }
29 
30 static inline void local_add(long i, local_t *l)
31 {
32  asm volatile(_ASM_ADD "%1,%0"
33  : "+m" (l->a.counter)
34  : "ir" (i));
35 }
36 
37 static inline void local_sub(long i, local_t *l)
38 {
39  asm volatile(_ASM_SUB "%1,%0"
40  : "+m" (l->a.counter)
41  : "ir" (i));
42 }
43 
53 static inline int local_sub_and_test(long i, local_t *l)
54 {
55  unsigned char c;
56 
57  asm volatile(_ASM_SUB "%2,%0; sete %1"
58  : "+m" (l->a.counter), "=qm" (c)
59  : "ir" (i) : "memory");
60  return c;
61 }
62 
71 static inline int local_dec_and_test(local_t *l)
72 {
73  unsigned char c;
74 
75  asm volatile(_ASM_DEC "%0; sete %1"
76  : "+m" (l->a.counter), "=qm" (c)
77  : : "memory");
78  return c != 0;
79 }
80 
89 static inline int local_inc_and_test(local_t *l)
90 {
91  unsigned char c;
92 
93  asm volatile(_ASM_INC "%0; sete %1"
94  : "+m" (l->a.counter), "=qm" (c)
95  : : "memory");
96  return c != 0;
97 }
98 
108 static inline int local_add_negative(long i, local_t *l)
109 {
110  unsigned char c;
111 
112  asm volatile(_ASM_ADD "%2,%0; sets %1"
113  : "+m" (l->a.counter), "=qm" (c)
114  : "ir" (i) : "memory");
115  return c;
116 }
117 
125 static inline long local_add_return(long i, local_t *l)
126 {
127  long __i;
128 #ifdef CONFIG_M386
129  unsigned long flags;
130  if (unlikely(boot_cpu_data.x86 <= 3))
131  goto no_xadd;
132 #endif
133  /* Modern 486+ processor */
134  __i = i;
135  asm volatile(_ASM_XADD "%0, %1;"
136  : "+r" (i), "+m" (l->a.counter)
137  : : "memory");
138  return i + __i;
139 
140 #ifdef CONFIG_M386
141 no_xadd: /* Legacy 386 processor */
142  local_irq_save(flags);
143  __i = local_read(l);
144  local_set(l, i + __i);
145  local_irq_restore(flags);
146  return i + __i;
147 #endif
148 }
149 
150 static inline long local_sub_return(long i, local_t *l)
151 {
152  return local_add_return(-i, l);
153 }
154 
155 #define local_inc_return(l) (local_add_return(1, l))
156 #define local_dec_return(l) (local_sub_return(1, l))
157 
158 #define local_cmpxchg(l, o, n) \
159  (cmpxchg_local(&((l)->a.counter), (o), (n)))
160 /* Always has a lock prefix */
161 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
162 
172 #define local_add_unless(l, a, u) \
173 ({ \
174  long c, old; \
175  c = local_read((l)); \
176  for (;;) { \
177  if (unlikely(c == (u))) \
178  break; \
179  old = local_cmpxchg((l), c, c + (a)); \
180  if (likely(old == c)) \
181  break; \
182  c = old; \
183  } \
184  c != (u); \
185 })
186 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
187 
188 /* On x86_32, these are no better than the atomic variants.
189  * On x86-64 these are better than the atomic variants on SMP kernels
190  * because they dont use a lock prefix.
191  */
192 #define __local_inc(l) local_inc(l)
193 #define __local_dec(l) local_dec(l)
194 #define __local_add(i, l) local_add((i), (l))
195 #define __local_sub(i, l) local_sub((i), (l))
196 
197 #endif /* _ASM_X86_LOCAL_H */