21 #include <linux/sched.h>
22 #include <linux/export.h>
31 #ifdef CONFIG_DEBUG_MUTEXES
32 # include "mutex-debug.h"
36 # include <asm/mutex.h>
45 mutex_clear_owner(lock);
52 #ifndef CONFIG_DEBUG_LOCK_ALLOC
60 __mutex_lock_slowpath(
atomic_t *lock_count);
91 mutex_set_owner(lock);
116 #ifndef CONFIG_DEBUG_MUTEXES
122 mutex_clear_owner(lock);
134 struct lockdep_map *nest_lock,
unsigned long ip)
143 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
175 mutex_set_owner(lock);
186 if (!owner && (need_resched() || rt_task(task)))
229 if (
unlikely(signal_pending_state(state, task))) {
251 mutex_set_owner(lock);
265 #ifdef CONFIG_DEBUG_LOCK_ALLOC
276 _mutex_lock_nest_lock(
struct mutex *lock,
struct lockdep_map *nest)
307 __mutex_unlock_common_slowpath(
atomic_t *lock_count,
int nested)
342 __mutex_unlock_slowpath(
atomic_t *lock_count)
344 __mutex_unlock_common_slowpath(lock_count, 1);
347 #ifndef CONFIG_DEBUG_LOCK_ALLOC
353 __mutex_lock_killable_slowpath(
atomic_t *lock_count);
356 __mutex_lock_interruptible_slowpath(
atomic_t *lock_count);
375 (&lock->
count, __mutex_lock_interruptible_slowpath);
377 mutex_set_owner(lock);
390 (&lock->
count, __mutex_lock_killable_slowpath);
392 mutex_set_owner(lock);
399 __mutex_lock_slowpath(
atomic_t *lock_count)
407 __mutex_lock_killable_slowpath(
atomic_t *lock_count)
415 __mutex_lock_interruptible_slowpath(
atomic_t *lock_count)
427 static inline int __mutex_trylock_slowpath(
atomic_t *lock_count)
437 mutex_set_owner(lock);
470 mutex_set_owner(lock);
486 if (atomic_add_unless(cnt, -1, 1))