Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
spinlock.h
Go to the documentation of this file.
1 #ifndef _ASM_M32R_SPINLOCK_H
2 #define _ASM_M32R_SPINLOCK_H
3 
4 /*
5  * linux/include/asm-m32r/spinlock.h
6  *
7  * M32R version:
8  * Copyright (C) 2001, 2002 Hitoshi Yamamoto
9  * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
10  */
11 
12 #include <linux/compiler.h>
13 #include <linux/atomic.h>
14 #include <asm/dcache_clear.h>
15 #include <asm/page.h>
16 
17 /*
18  * Your basic SMP spinlocks, allowing only a single CPU anywhere
19  *
20  * (the type definitions are in asm/spinlock_types.h)
21  *
22  * Simple spin lock operations. There are two variants, one clears IRQ's
23  * on the local processor, one does not.
24  *
25  * We make no fairness assumptions. They have a cost.
26  */
27 
28 #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
29 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
30 #define arch_spin_unlock_wait(x) \
31  do { cpu_relax(); } while (arch_spin_is_locked(x))
32 
40 static inline int arch_spin_trylock(arch_spinlock_t *lock)
41 {
42  int oldval;
43  unsigned long tmp1, tmp2;
44 
45  /*
46  * lock->slock : =1 : unlock
47  * : <=0 : lock
48  * {
49  * oldval = lock->slock; <--+ need atomic operation
50  * lock->slock = 0; <--+
51  * }
52  */
53  __asm__ __volatile__ (
54  "# arch_spin_trylock \n\t"
55  "ldi %1, #0; \n\t"
56  "mvfc %2, psw; \n\t"
57  "clrpsw #0x40 -> nop; \n\t"
58  DCACHE_CLEAR("%0", "r6", "%3")
59  "lock %0, @%3; \n\t"
60  "unlock %1, @%3; \n\t"
61  "mvtc %2, psw; \n\t"
62  : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
63  : "r" (&lock->slock)
64  : "memory"
65 #ifdef CONFIG_CHIP_M32700_TS1
66  , "r6"
67 #endif /* CONFIG_CHIP_M32700_TS1 */
68  );
69 
70  return (oldval > 0);
71 }
72 
73 static inline void arch_spin_lock(arch_spinlock_t *lock)
74 {
75  unsigned long tmp0, tmp1;
76 
77  /*
78  * lock->slock : =1 : unlock
79  * : <=0 : lock
80  *
81  * for ( ; ; ) {
82  * lock->slock -= 1; <-- need atomic operation
83  * if (lock->slock == 0) break;
84  * for ( ; lock->slock <= 0 ; );
85  * }
86  */
87  __asm__ __volatile__ (
88  "# arch_spin_lock \n\t"
89  ".fillinsn \n"
90  "1: \n\t"
91  "mvfc %1, psw; \n\t"
92  "clrpsw #0x40 -> nop; \n\t"
93  DCACHE_CLEAR("%0", "r6", "%2")
94  "lock %0, @%2; \n\t"
95  "addi %0, #-1; \n\t"
96  "unlock %0, @%2; \n\t"
97  "mvtc %1, psw; \n\t"
98  "bltz %0, 2f; \n\t"
99  LOCK_SECTION_START(".balign 4 \n\t")
100  ".fillinsn \n"
101  "2: \n\t"
102  "ld %0, @%2; \n\t"
103  "bgtz %0, 1b; \n\t"
104  "bra 2b; \n\t"
106  : "=&r" (tmp0), "=&r" (tmp1)
107  : "r" (&lock->slock)
108  : "memory"
109 #ifdef CONFIG_CHIP_M32700_TS1
110  , "r6"
111 #endif /* CONFIG_CHIP_M32700_TS1 */
112  );
113 }
114 
115 static inline void arch_spin_unlock(arch_spinlock_t *lock)
116 {
117  mb();
118  lock->slock = 1;
119 }
120 
121 /*
122  * Read-write spinlocks, allowing multiple readers
123  * but only one writer.
124  *
125  * NOTE! it is quite common to have readers in interrupts
126  * but no interrupt writers. For those circumstances we
127  * can "mix" irq-safe locks - any writer needs to get a
128  * irq-safe write-lock, but readers can get non-irqsafe
129  * read-locks.
130  *
131  * On x86, we implement read-write locks as a 32-bit counter
132  * with the high bit (sign) being the "contended" bit.
133  *
134  * The inline assembly is non-obvious. Think about it.
135  *
136  * Changed to use the same technique as rw semaphores. See
137  * semaphore.h for details. -ben
138  */
139 
144 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
145 
150 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
151 
152 static inline void arch_read_lock(arch_rwlock_t *rw)
153 {
154  unsigned long tmp0, tmp1;
155 
156  /*
157  * rw->lock : >0 : unlock
158  * : <=0 : lock
159  *
160  * for ( ; ; ) {
161  * rw->lock -= 1; <-- need atomic operation
162  * if (rw->lock >= 0) break;
163  * rw->lock += 1; <-- need atomic operation
164  * for ( ; rw->lock <= 0 ; );
165  * }
166  */
167  __asm__ __volatile__ (
168  "# read_lock \n\t"
169  ".fillinsn \n"
170  "1: \n\t"
171  "mvfc %1, psw; \n\t"
172  "clrpsw #0x40 -> nop; \n\t"
173  DCACHE_CLEAR("%0", "r6", "%2")
174  "lock %0, @%2; \n\t"
175  "addi %0, #-1; \n\t"
176  "unlock %0, @%2; \n\t"
177  "mvtc %1, psw; \n\t"
178  "bltz %0, 2f; \n\t"
179  LOCK_SECTION_START(".balign 4 \n\t")
180  ".fillinsn \n"
181  "2: \n\t"
182  "clrpsw #0x40 -> nop; \n\t"
183  DCACHE_CLEAR("%0", "r6", "%2")
184  "lock %0, @%2; \n\t"
185  "addi %0, #1; \n\t"
186  "unlock %0, @%2; \n\t"
187  "mvtc %1, psw; \n\t"
188  ".fillinsn \n"
189  "3: \n\t"
190  "ld %0, @%2; \n\t"
191  "bgtz %0, 1b; \n\t"
192  "bra 3b; \n\t"
194  : "=&r" (tmp0), "=&r" (tmp1)
195  : "r" (&rw->lock)
196  : "memory"
197 #ifdef CONFIG_CHIP_M32700_TS1
198  , "r6"
199 #endif /* CONFIG_CHIP_M32700_TS1 */
200  );
201 }
202 
203 static inline void arch_write_lock(arch_rwlock_t *rw)
204 {
205  unsigned long tmp0, tmp1, tmp2;
206 
207  /*
208  * rw->lock : =RW_LOCK_BIAS_STR : unlock
209  * : !=RW_LOCK_BIAS_STR : lock
210  *
211  * for ( ; ; ) {
212  * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
213  * if (rw->lock == 0) break;
214  * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
215  * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
216  * }
217  */
218  __asm__ __volatile__ (
219  "# write_lock \n\t"
220  "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
221  "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
222  ".fillinsn \n"
223  "1: \n\t"
224  "mvfc %2, psw; \n\t"
225  "clrpsw #0x40 -> nop; \n\t"
226  DCACHE_CLEAR("%0", "r7", "%3")
227  "lock %0, @%3; \n\t"
228  "sub %0, %1; \n\t"
229  "unlock %0, @%3; \n\t"
230  "mvtc %2, psw; \n\t"
231  "bnez %0, 2f; \n\t"
232  LOCK_SECTION_START(".balign 4 \n\t")
233  ".fillinsn \n"
234  "2: \n\t"
235  "clrpsw #0x40 -> nop; \n\t"
236  DCACHE_CLEAR("%0", "r7", "%3")
237  "lock %0, @%3; \n\t"
238  "add %0, %1; \n\t"
239  "unlock %0, @%3; \n\t"
240  "mvtc %2, psw; \n\t"
241  ".fillinsn \n"
242  "3: \n\t"
243  "ld %0, @%3; \n\t"
244  "beq %0, %1, 1b; \n\t"
245  "bra 3b; \n\t"
247  : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
248  : "r" (&rw->lock)
249  : "memory"
250 #ifdef CONFIG_CHIP_M32700_TS1
251  , "r7"
252 #endif /* CONFIG_CHIP_M32700_TS1 */
253  );
254 }
255 
256 static inline void arch_read_unlock(arch_rwlock_t *rw)
257 {
258  unsigned long tmp0, tmp1;
259 
260  __asm__ __volatile__ (
261  "# read_unlock \n\t"
262  "mvfc %1, psw; \n\t"
263  "clrpsw #0x40 -> nop; \n\t"
264  DCACHE_CLEAR("%0", "r6", "%2")
265  "lock %0, @%2; \n\t"
266  "addi %0, #1; \n\t"
267  "unlock %0, @%2; \n\t"
268  "mvtc %1, psw; \n\t"
269  : "=&r" (tmp0), "=&r" (tmp1)
270  : "r" (&rw->lock)
271  : "memory"
272 #ifdef CONFIG_CHIP_M32700_TS1
273  , "r6"
274 #endif /* CONFIG_CHIP_M32700_TS1 */
275  );
276 }
277 
278 static inline void arch_write_unlock(arch_rwlock_t *rw)
279 {
280  unsigned long tmp0, tmp1, tmp2;
281 
282  __asm__ __volatile__ (
283  "# write_unlock \n\t"
284  "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
285  "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
286  "mvfc %2, psw; \n\t"
287  "clrpsw #0x40 -> nop; \n\t"
288  DCACHE_CLEAR("%0", "r7", "%3")
289  "lock %0, @%3; \n\t"
290  "add %0, %1; \n\t"
291  "unlock %0, @%3; \n\t"
292  "mvtc %2, psw; \n\t"
293  : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
294  : "r" (&rw->lock)
295  : "memory"
296 #ifdef CONFIG_CHIP_M32700_TS1
297  , "r7"
298 #endif /* CONFIG_CHIP_M32700_TS1 */
299  );
300 }
301 
302 static inline int arch_read_trylock(arch_rwlock_t *lock)
303 {
304  atomic_t *count = (atomic_t*)lock;
305  if (atomic_dec_return(count) >= 0)
306  return 1;
307  atomic_inc(count);
308  return 0;
309 }
310 
311 static inline int arch_write_trylock(arch_rwlock_t *lock)
312 {
313  atomic_t *count = (atomic_t *)lock;
314  if (atomic_sub_and_test(RW_LOCK_BIAS, count))
315  return 1;
316  atomic_add(RW_LOCK_BIAS, count);
317  return 0;
318 }
319 
320 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
321 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
322 
323 #define arch_spin_relax(lock) cpu_relax()
324 #define arch_read_relax(lock) cpu_relax()
325 #define arch_write_relax(lock) cpu_relax()
326 
327 #endif /* _ASM_M32R_SPINLOCK_H */