Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
•
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
arm
include
asm
spinlock.h
Go to the documentation of this file.
1
#ifndef __ASM_SPINLOCK_H
2
#define __ASM_SPINLOCK_H
3
4
#if __LINUX_ARM_ARCH__ < 6
5
#error SMP not supported on pre-ARMv6 CPUs
6
#endif
7
8
#include <asm/processor.h>
9
10
/*
11
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12
* extensions, so when running on UP, we have to patch these instructions away.
13
*/
14
#define ALT_SMP(smp, up) \
15
"9998: " smp "\n" \
16
" .pushsection \".alt.smp.init\", \"a\"\n" \
17
" .long 9998b\n" \
18
" " up "\n" \
19
" .popsection\n"
20
21
#ifdef CONFIG_THUMB2_KERNEL
22
#define SEV ALT_SMP("sev.w", "nop.w")
23
/*
24
* For Thumb-2, special care is needed to ensure that the conditional WFE
25
* instruction really does assemble to exactly 4 bytes (as required by
26
* the SMP_ON_UP fixup code). By itself "wfene" might cause the
27
* assembler to insert a extra (16-bit) IT instruction, depending on the
28
* presence or absence of neighbouring conditional instructions.
29
*
30
* To avoid this unpredictableness, an approprite IT is inserted explicitly:
31
* the assembler won't change IT instructions which are explicitly present
32
* in the input.
33
*/
34
#define WFE(cond) ALT_SMP( \
35
"it " cond "\n\t" \
36
"wfe" cond ".n", \
37
\
38
"nop.w" \
39
)
40
#else
41
#define SEV ALT_SMP("sev", "nop")
42
#define WFE(cond) ALT_SMP("wfe" cond, "nop")
43
#endif
44
45
static
inline
void
dsb_sev(
void
)
46
{
47
#if __LINUX_ARM_ARCH__ >= 7
48
__asm__
__volatile__ (
49
"dsb\n"
50
SEV
51
);
52
#else
53
__asm__
__volatile__ (
54
"mcr p15, 0, %0, c7, c10, 4\n"
55
SEV
56
: :
"r"
(0)
57
);
58
#endif
59
}
60
61
/*
62
* ARMv6 ticket-based spin-locking.
63
*
64
* A memory barrier is required after we get a lock, and before we
65
* release it, because V6 CPUs are assumed to have weakly ordered
66
* memory.
67
*/
68
69
#define arch_spin_unlock_wait(lock) \
70
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
71
72
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
73
74
static
inline
void
arch_spin_lock
(
arch_spinlock_t
*lock)
75
{
76
unsigned
long
tmp
;
77
u32
newval;
78
arch_spinlock_t
lockval;
79
80
__asm__
__volatile__(
81
"1: ldrex %0, [%3]\n"
82
" add %1, %0, %4\n"
83
" strex %2, %1, [%3]\n"
84
" teq %2, #0\n"
85
" bne 1b"
86
:
"=&r"
(lockval),
"=&r"
(newval),
"=&r"
(tmp)
87
:
"r"
(&lock->
slock
),
"I"
(1 <<
TICKET_SHIFT
)
88
:
"cc"
);
89
90
while
(lockval.
tickets
.next != lockval.
tickets
.owner) {
91
wfe
();
92
lockval.
tickets
.owner =
ACCESS_ONCE
(lock->
tickets
.owner);
93
}
94
95
smp_mb
();
96
}
97
98
static
inline
int
arch_spin_trylock
(
arch_spinlock_t
*lock)
99
{
100
unsigned
long
tmp
;
101
u32
slock;
102
103
__asm__
__volatile__(
104
" ldrex %0, [%2]\n"
105
" subs %1, %0, %0, ror #16\n"
106
" addeq %0, %0, %3\n"
107
" strexeq %1, %0, [%2]"
108
:
"=&r"
(slock),
"=&r"
(tmp)
109
:
"r"
(&lock->
slock
),
"I"
(1 <<
TICKET_SHIFT
)
110
:
"cc"
);
111
112
if
(tmp == 0) {
113
smp_mb
();
114
return
1;
115
}
else
{
116
return
0;
117
}
118
}
119
120
static
inline
void
arch_spin_unlock
(
arch_spinlock_t
*lock)
121
{
122
unsigned
long
tmp
;
123
u32
slock;
124
125
smp_mb
();
126
127
__asm__
__volatile__(
128
" mov %1, #1\n"
129
"1: ldrex %0, [%2]\n"
130
" uadd16 %0, %0, %1\n"
131
" strex %1, %0, [%2]\n"
132
" teq %1, #0\n"
133
" bne 1b"
134
:
"=&r"
(slock),
"=&r"
(tmp)
135
:
"r"
(&lock->
slock
)
136
:
"cc"
);
137
138
dsb_sev();
139
}
140
141
static
inline
int
arch_spin_is_locked
(
arch_spinlock_t
*lock)
142
{
143
struct
__raw_tickets tickets =
ACCESS_ONCE
(lock->
tickets
);
144
return
tickets.owner != tickets.next;
145
}
146
147
static
inline
int
arch_spin_is_contended
(
arch_spinlock_t
*lock)
148
{
149
struct
__raw_tickets tickets =
ACCESS_ONCE
(lock->
tickets
);
150
return
(tickets.next - tickets.owner) > 1;
151
}
152
#define arch_spin_is_contended arch_spin_is_contended
153
154
/*
155
* RWLOCKS
156
*
157
*
158
* Write locks are easy - we just set bit 31. When unlocking, we can
159
* just write zero since the lock is exclusively held.
160
*/
161
162
static
inline
void
arch_write_lock
(
arch_rwlock_t
*
rw
)
163
{
164
unsigned
long
tmp
;
165
166
__asm__
__volatile__(
167
"1: ldrex %0, [%1]\n"
168
" teq %0, #0\n"
169
WFE
(
"ne"
)
170
" strexeq %0, %2, [%1]\n"
171
" teq %0, #0\n"
172
" bne 1b"
173
:
"=&r"
(tmp)
174
:
"r"
(&rw->
lock
),
"r"
(0x80000000)
175
:
"cc"
);
176
177
smp_mb
();
178
}
179
180
static
inline
int
arch_write_trylock
(
arch_rwlock_t
*
rw
)
181
{
182
unsigned
long
tmp
;
183
184
__asm__
__volatile__(
185
" ldrex %0, [%1]\n"
186
" teq %0, #0\n"
187
" strexeq %0, %2, [%1]"
188
:
"=&r"
(tmp)
189
:
"r"
(&rw->
lock
),
"r"
(0x80000000)
190
:
"cc"
);
191
192
if
(tmp == 0) {
193
smp_mb
();
194
return
1;
195
}
else
{
196
return
0;
197
}
198
}
199
200
static
inline
void
arch_write_unlock(
arch_rwlock_t
*rw)
201
{
202
smp_mb
();
203
204
__asm__
__volatile__(
205
"str %1, [%0]\n"
206
:
207
:
"r"
(&rw->
lock
),
"r"
(0)
208
:
"cc"
);
209
210
dsb_sev();
211
}
212
213
/* write_can_lock - would write_trylock() succeed? */
214
#define arch_write_can_lock(x) ((x)->lock == 0)
215
216
/*
217
* Read locks are a bit more hairy:
218
* - Exclusively load the lock value.
219
* - Increment it.
220
* - Store new lock value if positive, and we still own this location.
221
* If the value is negative, we've already failed.
222
* - If we failed to store the value, we want a negative result.
223
* - If we failed, try again.
224
* Unlocking is similarly hairy. We may have multiple read locks
225
* currently active. However, we know we won't have any write
226
* locks.
227
*/
228
static
inline
void
arch_read_lock
(
arch_rwlock_t
*rw)
229
{
230
unsigned
long
tmp
, tmp2;
231
232
__asm__
__volatile__(
233
"1: ldrex %0, [%2]\n"
234
" adds %0, %0, #1\n"
235
" strexpl %1, %0, [%2]\n"
236
WFE
(
"mi"
)
237
" rsbpls %0, %1, #0\n"
238
" bmi 1b"
239
:
"=&r"
(tmp),
"=&r"
(tmp2)
240
:
"r"
(&rw->
lock
)
241
:
"cc"
);
242
243
smp_mb
();
244
}
245
246
static
inline
void
arch_read_unlock
(
arch_rwlock_t
*rw)
247
{
248
unsigned
long
tmp
, tmp2;
249
250
smp_mb
();
251
252
__asm__
__volatile__(
253
"1: ldrex %0, [%2]\n"
254
" sub %0, %0, #1\n"
255
" strex %1, %0, [%2]\n"
256
" teq %1, #0\n"
257
" bne 1b"
258
:
"=&r"
(tmp),
"=&r"
(tmp2)
259
:
"r"
(&rw->
lock
)
260
:
"cc"
);
261
262
if
(tmp == 0)
263
dsb_sev();
264
}
265
266
static
inline
int
arch_read_trylock
(
arch_rwlock_t
*rw)
267
{
268
unsigned
long
tmp
, tmp2 = 1;
269
270
__asm__
__volatile__(
271
" ldrex %0, [%2]\n"
272
" adds %0, %0, #1\n"
273
" strexpl %1, %0, [%2]\n"
274
:
"=&r"
(tmp),
"+r"
(tmp2)
275
:
"r"
(&rw->
lock
)
276
:
"cc"
);
277
278
smp_mb
();
279
return
tmp2 == 0;
280
}
281
282
/* read_can_lock - would read_trylock() succeed? */
283
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
284
285
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
286
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
287
288
#define arch_spin_relax(lock) cpu_relax()
289
#define arch_read_relax(lock) cpu_relax()
290
#define arch_write_relax(lock) cpu_relax()
291
292
#endif
/* __ASM_SPINLOCK_H */
Generated on Thu Jan 10 2013 12:50:34 for Linux Kernel by
1.8.2