Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
kernel
semaphore.c
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2008 Intel Corporation
3
* Author: Matthew Wilcox <
[email protected]
>
4
*
5
* Distributed under the terms of the GNU GPL, version 2
6
*
7
* This file implements counting semaphores.
8
* A counting semaphore may be acquired 'n' times before sleeping.
9
* See mutex.c for single-acquisition sleeping locks which enforce
10
* rules which allow code to be debugged more easily.
11
*/
12
13
/*
14
* Some notes on the implementation:
15
*
16
* The spinlock controls access to the other members of the semaphore.
17
* down_trylock() and up() can be called from interrupt context, so we
18
* have to disable interrupts when taking the lock. It turns out various
19
* parts of the kernel expect to be able to use down() on a semaphore in
20
* interrupt context when they know it will succeed, so we have to use
21
* irqsave variants for down(), down_interruptible() and down_killable()
22
* too.
23
*
24
* The ->count variable represents how many more tasks can acquire this
25
* semaphore. If it's zero, there may be tasks waiting on the wait_list.
26
*/
27
28
#include <linux/compiler.h>
29
#include <linux/kernel.h>
30
#include <linux/export.h>
31
#include <linux/sched.h>
32
#include <
linux/semaphore.h
>
33
#include <
linux/spinlock.h
>
34
#include <
linux/ftrace.h
>
35
36
static
noinline
void
__down(
struct
semaphore
*
sem
);
37
static
noinline
int
__down_interruptible(
struct
semaphore
*
sem
);
38
static
noinline
int
__down_killable(
struct
semaphore
*
sem
);
39
static
noinline
int
__down_timeout(
struct
semaphore
*
sem
,
long
jiffies);
40
static
noinline
void
__up(
struct
semaphore
*
sem
);
41
53
void
down
(
struct
semaphore
*
sem
)
54
{
55
unsigned
long
flags
;
56
57
raw_spin_lock_irqsave
(&sem->
lock
, flags);
58
if
(
likely
(sem->
count
> 0))
59
sem->
count
--;
60
else
61
__down(sem);
62
raw_spin_unlock_irqrestore
(&sem->
lock
, flags);
63
}
64
EXPORT_SYMBOL
(
down
);
65
75
int
down_interruptible
(
struct
semaphore
*
sem
)
76
{
77
unsigned
long
flags
;
78
int
result
= 0;
79
80
raw_spin_lock_irqsave
(&sem->
lock
, flags);
81
if
(
likely
(sem->
count
> 0))
82
sem->
count
--;
83
else
84
result = __down_interruptible(sem);
85
raw_spin_unlock_irqrestore
(&sem->
lock
, flags);
86
87
return
result
;
88
}
89
EXPORT_SYMBOL
(
down_interruptible
);
90
101
int
down_killable
(
struct
semaphore
*
sem
)
102
{
103
unsigned
long
flags
;
104
int
result
= 0;
105
106
raw_spin_lock_irqsave
(&sem->
lock
, flags);
107
if
(
likely
(sem->
count
> 0))
108
sem->
count
--;
109
else
110
result = __down_killable(sem);
111
raw_spin_unlock_irqrestore
(&sem->
lock
, flags);
112
113
return
result
;
114
}
115
EXPORT_SYMBOL
(
down_killable
);
116
130
int
down_trylock
(
struct
semaphore
*
sem
)
131
{
132
unsigned
long
flags
;
133
int
count
;
134
135
raw_spin_lock_irqsave
(&sem->
lock
, flags);
136
count = sem->
count
- 1;
137
if
(
likely
(count >= 0))
138
sem->
count
=
count
;
139
raw_spin_unlock_irqrestore
(&sem->
lock
, flags);
140
141
return
(count < 0);
142
}
143
EXPORT_SYMBOL
(
down_trylock
);
144
155
int
down_timeout
(
struct
semaphore
*
sem
,
long
jiffies)
156
{
157
unsigned
long
flags
;
158
int
result
= 0;
159
160
raw_spin_lock_irqsave
(&sem->
lock
, flags);
161
if
(
likely
(sem->
count
> 0))
162
sem->
count
--;
163
else
164
result = __down_timeout(sem, jiffies);
165
raw_spin_unlock_irqrestore
(&sem->
lock
, flags);
166
167
return
result
;
168
}
169
EXPORT_SYMBOL
(
down_timeout
);
170
178
void
up
(
struct
semaphore
*
sem
)
179
{
180
unsigned
long
flags
;
181
182
raw_spin_lock_irqsave
(&sem->
lock
, flags);
183
if
(
likely
(list_empty(&sem->
wait_list
)))
184
sem->
count
++;
185
else
186
__up(sem);
187
raw_spin_unlock_irqrestore
(&sem->
lock
, flags);
188
}
189
EXPORT_SYMBOL
(
up
);
190
191
/* Functions for the contended case */
192
193
struct
semaphore_waiter
{
194
struct
list_head
list
;
195
struct
task_struct
*
task
;
196
int
up
;
197
};
198
199
/*
200
* Because this function is inlined, the 'state' parameter will be
201
* constant, and thus optimised away by the compiler. Likewise the
202
* 'timeout' parameter for the cases without timeouts.
203
*/
204
static
inline
int
__sched
__down_common(
struct
semaphore
*
sem
,
long
state
,
205
long
timeout)
206
{
207
struct
task_struct
*
task
=
current
;
208
struct
semaphore_waiter
waiter;
209
210
list_add_tail
(&waiter.
list
, &sem->
wait_list
);
211
waiter.
task
=
task
;
212
waiter.
up
= 0;
213
214
for
(;;) {
215
if
(signal_pending_state(state, task))
216
goto
interrupted;
217
if
(timeout <= 0)
218
goto
timed_out;
219
__set_task_state
(task, state);
220
raw_spin_unlock_irq
(&sem->
lock
);
221
timeout =
schedule_timeout
(timeout);
222
raw_spin_lock_irq
(&sem->
lock
);
223
if
(waiter.
up
)
224
return
0;
225
}
226
227
timed_out:
228
list_del
(&waiter.
list
);
229
return
-
ETIME
;
230
231
interrupted:
232
list_del
(&waiter.
list
);
233
return
-
EINTR
;
234
}
235
236
static
noinline
void
__sched
__down(
struct
semaphore
*
sem
)
237
{
238
__down_common(sem,
TASK_UNINTERRUPTIBLE
,
MAX_SCHEDULE_TIMEOUT
);
239
}
240
241
static
noinline
int
__sched
__down_interruptible(
struct
semaphore
*
sem
)
242
{
243
return
__down_common(sem,
TASK_INTERRUPTIBLE
,
MAX_SCHEDULE_TIMEOUT
);
244
}
245
246
static
noinline
int
__sched
__down_killable(
struct
semaphore
*
sem
)
247
{
248
return
__down_common(sem,
TASK_KILLABLE
,
MAX_SCHEDULE_TIMEOUT
);
249
}
250
251
static
noinline
int
__sched
__down_timeout(
struct
semaphore
*
sem
,
long
jiffies)
252
{
253
return
__down_common(sem,
TASK_UNINTERRUPTIBLE
, jiffies);
254
}
255
256
static
noinline
void
__sched
__up(
struct
semaphore
*
sem
)
257
{
258
struct
semaphore_waiter
*
waiter
=
list_first_entry
(&sem->
wait_list
,
259
struct
semaphore_waiter
,
list
);
260
list_del
(&waiter->
list
);
261
waiter->
up
= 1;
262
wake_up_process
(waiter->
task
);
263
}
Generated on Thu Jan 10 2013 14:54:58 for Linux Kernel by
1.8.2