Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
spinlock_64.h
Go to the documentation of this file.
1 /*
2  * Copyright 2011 Tilera Corporation. All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation, version 2.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  * NON INFRINGEMENT. See the GNU General Public License for
12  * more details.
13  *
14  * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
15  * (the type definitions are in asm/spinlock_types.h)
16  */
17 
18 #ifndef _ASM_TILE_SPINLOCK_64_H
19 #define _ASM_TILE_SPINLOCK_64_H
20 
21 /* Shifts and masks for the various fields in "lock". */
22 #define __ARCH_SPIN_CURRENT_SHIFT 17
23 #define __ARCH_SPIN_NEXT_MASK 0x7fff
24 #define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
25 
26 /*
27  * Return the "current" portion of a ticket lock value,
28  * i.e. the number that currently owns the lock.
29  */
30 static inline int arch_spin_current(u32 val)
31 {
32  return val >> __ARCH_SPIN_CURRENT_SHIFT;
33 }
34 
35 /*
36  * Return the "next" portion of a ticket lock value,
37  * i.e. the number that the next task to try to acquire the lock will get.
38  */
39 static inline int arch_spin_next(u32 val)
40 {
41  return val & __ARCH_SPIN_NEXT_MASK;
42 }
43 
44 /* The lock is locked if a task would have to wait to get it. */
45 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
46 {
47  u32 val = lock->lock;
48  return arch_spin_current(val) != arch_spin_next(val);
49 }
50 
51 /* Bump the current ticket so the next task owns the lock. */
52 static inline void arch_spin_unlock(arch_spinlock_t *lock)
53 {
54  wmb(); /* guarantee anything modified under the lock is visible */
55  __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
56 }
57 
59 
61 
62 /* Grab the "next" ticket number and bump it atomically.
63  * If the current ticket is not ours, go to the slow path.
64  * We also take the slow path if the "next" value overflows.
65  */
66 static inline void arch_spin_lock(arch_spinlock_t *lock)
67 {
68  u32 val = __insn_fetchadd4(&lock->lock, 1);
70  if (unlikely(arch_spin_current(val) != ticket))
71  arch_spin_lock_slow(lock, ticket);
72 }
73 
74 /* Try to get the lock, and return whether we succeeded. */
76 
77 /* We cannot take an interrupt after getting a ticket, so don't enable them. */
78 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
79 
80 /*
81  * Read-write spinlocks, allowing multiple readers
82  * but only one writer.
83  *
84  * We use fetchadd() for readers, and fetchor() with the sign bit
85  * for writers.
86  */
87 
88 #define __WRITE_LOCK_BIT (1 << 31)
89 
90 static inline int arch_write_val_locked(int val)
91 {
92  return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
93 }
94 
99 static inline int arch_read_can_lock(arch_rwlock_t *rw)
100 {
101  return !arch_write_val_locked(rw->lock);
102 }
103 
108 static inline int arch_write_can_lock(arch_rwlock_t *rw)
109 {
110  return rw->lock == 0;
111 }
112 
113 extern void __read_lock_failed(arch_rwlock_t *rw);
114 
115 static inline void arch_read_lock(arch_rwlock_t *rw)
116 {
117  u32 val = __insn_fetchaddgez4(&rw->lock, 1);
118  if (unlikely(arch_write_val_locked(val)))
119  __read_lock_failed(rw);
120 }
121 
122 extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
123 
124 static inline void arch_write_lock(arch_rwlock_t *rw)
125 {
126  u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
127  if (unlikely(val != 0))
128  __write_lock_failed(rw, val);
129 }
130 
131 static inline void arch_read_unlock(arch_rwlock_t *rw)
132 {
133  __insn_mf();
134  __insn_fetchadd4(&rw->lock, -1);
135 }
136 
137 static inline void arch_write_unlock(arch_rwlock_t *rw)
138 {
139  __insn_mf();
140  __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
141 }
142 
143 static inline int arch_read_trylock(arch_rwlock_t *rw)
144 {
145  return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
146 }
147 
148 static inline int arch_write_trylock(arch_rwlock_t *rw)
149 {
150  u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
151  if (likely(val == 0))
152  return 1;
153  if (!arch_write_val_locked(val))
154  __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
155  return 0;
156 }
157 
158 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
159 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
160 
161 #endif /* _ASM_TILE_SPINLOCK_64_H */