Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mutex.c
Go to the documentation of this file.
1 /*
2  * kernel/mutex.c
3  *
4  * Mutexes: blocking mutual exclusion locks
5  *
6  * Started by Ingo Molnar:
7  *
8  * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <[email protected]>
9  *
10  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11  * David Howells for suggestions and improvements.
12  *
13  * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14  * from the -rt tree, where it was originally implemented for rtmutexes
15  * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16  * and Sven Dietrich.
17  *
18  * Also see Documentation/mutex-design.txt.
19  */
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/export.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/debug_locks.h>
26 
27 /*
28  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
29  * which forces all calls into the slowpath:
30  */
31 #ifdef CONFIG_DEBUG_MUTEXES
32 # include "mutex-debug.h"
33 # include <asm-generic/mutex-null.h>
34 #else
35 # include "mutex.h"
36 # include <asm/mutex.h>
37 #endif
38 
39 void
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 {
42  atomic_set(&lock->count, 1);
43  spin_lock_init(&lock->wait_lock);
44  INIT_LIST_HEAD(&lock->wait_list);
45  mutex_clear_owner(lock);
46 
47  debug_mutex_init(lock, name, key);
48 }
49 
51 
52 #ifndef CONFIG_DEBUG_LOCK_ALLOC
53 /*
54  * We split the mutex lock/unlock logic into separate fastpath and
55  * slowpath functions, to reduce the register pressure on the fastpath.
56  * We also put the fastpath first in the kernel image, to make sure the
57  * branch is predicted by the CPU as default-untaken.
58  */
59 static __used noinline void __sched
60 __mutex_lock_slowpath(atomic_t *lock_count);
61 
83 void __sched mutex_lock(struct mutex *lock)
84 {
85  might_sleep();
86  /*
87  * The locking fastpath is the 1->0 transition from
88  * 'unlocked' into 'locked' state.
89  */
90  __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
91  mutex_set_owner(lock);
92 }
93 
95 #endif
96 
97 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
98 
110 void __sched mutex_unlock(struct mutex *lock)
111 {
112  /*
113  * The unlocking fastpath is the 0->1 transition from 'locked'
114  * into 'unlocked' state:
115  */
116 #ifndef CONFIG_DEBUG_MUTEXES
117  /*
118  * When debugging is enabled we must not clear the owner before time,
119  * the slow path will always be taken, and that clears the owner field
120  * after verifying that it was indeed current.
121  */
122  mutex_clear_owner(lock);
123 #endif
124  __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
125 }
126 
128 
129 /*
130  * Lock a mutex (possibly interruptible), slowpath:
131  */
132 static inline int __sched
133 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
134  struct lockdep_map *nest_lock, unsigned long ip)
135 {
136  struct task_struct *task = current;
137  struct mutex_waiter waiter;
138  unsigned long flags;
139 
140  preempt_disable();
141  mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
142 
143 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
144  /*
145  * Optimistic spinning.
146  *
147  * We try to spin for acquisition when we find that there are no
148  * pending waiters and the lock owner is currently running on a
149  * (different) CPU.
150  *
151  * The rationale is that if the lock owner is running, it is likely to
152  * release the lock soon.
153  *
154  * Since this needs the lock owner, and this mutex implementation
155  * doesn't track the owner atomically in the lock field, we need to
156  * track it non-atomically.
157  *
158  * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
159  * to serialize everything.
160  */
161 
162  for (;;) {
163  struct task_struct *owner;
164 
165  /*
166  * If there's an owner, wait for it to either
167  * release the lock or go to sleep.
168  */
169  owner = ACCESS_ONCE(lock->owner);
170  if (owner && !mutex_spin_on_owner(lock, owner))
171  break;
172 
173  if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
174  lock_acquired(&lock->dep_map, ip);
175  mutex_set_owner(lock);
176  preempt_enable();
177  return 0;
178  }
179 
180  /*
181  * When there's no owner, we might have preempted between the
182  * owner acquiring the lock and setting the owner field. If
183  * we're an RT task that will live-lock because we won't let
184  * the owner complete.
185  */
186  if (!owner && (need_resched() || rt_task(task)))
187  break;
188 
189  /*
190  * The cpu_relax() call is a compiler barrier which forces
191  * everything in this loop to be re-loaded. We don't need
192  * memory barriers as we'll eventually observe the right
193  * values at the cost of a few extra spins.
194  */
196  }
197 #endif
198  spin_lock_mutex(&lock->wait_lock, flags);
199 
202 
203  /* add waiting tasks to the end of the waitqueue (FIFO): */
205  waiter.task = task;
206 
207  if (atomic_xchg(&lock->count, -1) == 1)
208  goto done;
209 
210  lock_contended(&lock->dep_map, ip);
211 
212  for (;;) {
213  /*
214  * Lets try to take the lock again - this is needed even if
215  * we get here for the first time (shortly after failing to
216  * acquire the lock), to make sure that we get a wakeup once
217  * it's unlocked. Later on, if we sleep, this is the
218  * operation that gives us the lock. We xchg it to -1, so
219  * that when we release the lock, we properly wake up the
220  * other waiters:
221  */
222  if (atomic_xchg(&lock->count, -1) == 1)
223  break;
224 
225  /*
226  * got a signal? (This code gets eliminated in the
227  * TASK_UNINTERRUPTIBLE case.)
228  */
229  if (unlikely(signal_pending_state(state, task))) {
231  task_thread_info(task));
232  mutex_release(&lock->dep_map, 1, ip);
233  spin_unlock_mutex(&lock->wait_lock, flags);
234 
236  preempt_enable();
237  return -EINTR;
238  }
239  __set_task_state(task, state);
240 
241  /* didn't get the lock, go to sleep: */
242  spin_unlock_mutex(&lock->wait_lock, flags);
244  spin_lock_mutex(&lock->wait_lock, flags);
245  }
246 
247 done:
248  lock_acquired(&lock->dep_map, ip);
249  /* got the lock - rejoice! */
251  mutex_set_owner(lock);
252 
253  /* set it to 0 if there are no waiters left: */
254  if (likely(list_empty(&lock->wait_list)))
255  atomic_set(&lock->count, 0);
256 
257  spin_unlock_mutex(&lock->wait_lock, flags);
258 
260  preempt_enable();
261 
262  return 0;
263 }
264 
265 #ifdef CONFIG_DEBUG_LOCK_ALLOC
266 void __sched
267 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
268 {
269  might_sleep();
270  __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
271 }
272 
274 
275 void __sched
276 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
277 {
278  might_sleep();
279  __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
280 }
281 
282 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
283 
284 int __sched
285 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
286 {
287  might_sleep();
288  return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
289 }
291 
292 int __sched
293 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
294 {
295  might_sleep();
296  return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
297  subclass, NULL, _RET_IP_);
298 }
299 
301 #endif
302 
303 /*
304  * Release the lock, slowpath:
305  */
306 static inline void
307 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
308 {
309  struct mutex *lock = container_of(lock_count, struct mutex, count);
310  unsigned long flags;
311 
312  spin_lock_mutex(&lock->wait_lock, flags);
313  mutex_release(&lock->dep_map, nested, _RET_IP_);
314  debug_mutex_unlock(lock);
315 
316  /*
317  * some architectures leave the lock unlocked in the fastpath failure
318  * case, others need to leave it locked. In the later case we have to
319  * unlock it here
320  */
322  atomic_set(&lock->count, 1);
323 
324  if (!list_empty(&lock->wait_list)) {
325  /* get the first entry from the wait-list: */
326  struct mutex_waiter *waiter =
327  list_entry(lock->wait_list.next,
328  struct mutex_waiter, list);
329 
330  debug_mutex_wake_waiter(lock, waiter);
331 
332  wake_up_process(waiter->task);
333  }
334 
335  spin_unlock_mutex(&lock->wait_lock, flags);
336 }
337 
338 /*
339  * Release the lock, slowpath:
340  */
341 static __used noinline void
342 __mutex_unlock_slowpath(atomic_t *lock_count)
343 {
344  __mutex_unlock_common_slowpath(lock_count, 1);
345 }
346 
347 #ifndef CONFIG_DEBUG_LOCK_ALLOC
348 /*
349  * Here come the less common (and hence less performance-critical) APIs:
350  * mutex_lock_interruptible() and mutex_trylock().
351  */
352 static noinline int __sched
353 __mutex_lock_killable_slowpath(atomic_t *lock_count);
354 
355 static noinline int __sched
356 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
357 
370 {
371  int ret;
372 
373  might_sleep();
375  (&lock->count, __mutex_lock_interruptible_slowpath);
376  if (!ret)
377  mutex_set_owner(lock);
378 
379  return ret;
380 }
381 
383 
385 {
386  int ret;
387 
388  might_sleep();
390  (&lock->count, __mutex_lock_killable_slowpath);
391  if (!ret)
392  mutex_set_owner(lock);
393 
394  return ret;
395 }
397 
398 static __used noinline void __sched
399 __mutex_lock_slowpath(atomic_t *lock_count)
400 {
401  struct mutex *lock = container_of(lock_count, struct mutex, count);
402 
403  __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
404 }
405 
406 static noinline int __sched
407 __mutex_lock_killable_slowpath(atomic_t *lock_count)
408 {
409  struct mutex *lock = container_of(lock_count, struct mutex, count);
410 
411  return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
412 }
413 
414 static noinline int __sched
415 __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
416 {
417  struct mutex *lock = container_of(lock_count, struct mutex, count);
418 
419  return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
420 }
421 #endif
422 
423 /*
424  * Spinlock based trylock, we take the spinlock and check whether we
425  * can get the lock:
426  */
427 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
428 {
429  struct mutex *lock = container_of(lock_count, struct mutex, count);
430  unsigned long flags;
431  int prev;
432 
433  spin_lock_mutex(&lock->wait_lock, flags);
434 
435  prev = atomic_xchg(&lock->count, -1);
436  if (likely(prev == 1)) {
437  mutex_set_owner(lock);
438  mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
439  }
440 
441  /* Set it back to 0 if there are no waiters: */
442  if (likely(list_empty(&lock->wait_list)))
443  atomic_set(&lock->count, 0);
444 
445  spin_unlock_mutex(&lock->wait_lock, flags);
446 
447  return prev == 1;
448 }
449 
464 int __sched mutex_trylock(struct mutex *lock)
465 {
466  int ret;
467 
468  ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
469  if (ret)
470  mutex_set_owner(lock);
471 
472  return ret;
473 }
475 
484 {
485  /* dec if we can't possibly hit 0 */
486  if (atomic_add_unless(cnt, -1, 1))
487  return 0;
488  /* we might hit 0, so take the lock */
489  mutex_lock(lock);
490  if (!atomic_dec_and_test(cnt)) {
491  /* when we actually did the dec, we didn't hit 0 */
492  mutex_unlock(lock);
493  return 0;
494  }
495  /* we hit 0, and we hold the lock */
496  return 1;
497 }