Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
tile
include
asm
spinlock_32.h
Go to the documentation of this file.
1
/*
2
* Copyright 2010 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*
14
* 32-bit SMP spinlocks.
15
*/
16
17
#ifndef _ASM_TILE_SPINLOCK_32_H
18
#define _ASM_TILE_SPINLOCK_32_H
19
20
#include <
linux/atomic.h
>
21
#include <asm/page.h>
22
#include <linux/compiler.h>
23
24
/*
25
* We only use even ticket numbers so the '1' inserted by a tns is
26
* an unambiguous "ticket is busy" flag.
27
*/
28
#define TICKET_QUANTUM 2
29
30
31
/*
32
* SMP ticket spinlocks, allowing only a single CPU anywhere
33
*
34
* (the type definitions are in asm/spinlock_types.h)
35
*/
36
static
inline
int
arch_spin_is_locked
(
arch_spinlock_t
*
lock
)
37
{
38
/*
39
* Note that even if a new ticket is in the process of being
40
* acquired, so lock->next_ticket is 1, it's still reasonable
41
* to claim the lock is held, since it will be momentarily
42
* if not already. There's no need to wait for a "valid"
43
* lock->next_ticket to become available.
44
*/
45
return
lock->next_ticket != lock->current_ticket;
46
}
47
48
void
arch_spin_lock
(
arch_spinlock_t
*
lock
);
49
50
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
51
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
52
53
int
arch_spin_trylock
(
arch_spinlock_t
*
lock
);
54
55
static
inline
void
arch_spin_unlock
(
arch_spinlock_t
*
lock
)
56
{
57
/* For efficiency, overlap fetching the old ticket with the wmb(). */
58
int
old_ticket = lock->current_ticket;
59
wmb
();
/* guarantee anything modified under the lock is visible */
60
lock->current_ticket = old_ticket +
TICKET_QUANTUM
;
61
}
62
63
void
arch_spin_unlock_wait
(
arch_spinlock_t
*
lock
);
64
65
/*
66
* Read-write spinlocks, allowing multiple readers
67
* but only one writer.
68
*
69
* We use a "tns/store-back" technique on a single word to manage
70
* the lock state, looping around to retry if the tns returns 1.
71
*/
72
73
/* Internal layout of the word; do not use. */
74
#define _WR_NEXT_SHIFT 8
75
#define _WR_CURR_SHIFT 16
76
#define _WR_WIDTH 8
77
#define _RD_COUNT_SHIFT 24
78
#define _RD_COUNT_WIDTH 8
79
83
static
inline
int
arch_read_can_lock
(
arch_rwlock_t
*rwlock)
84
{
85
return
(rwlock->
lock
<<
_RD_COUNT_WIDTH
) == 0;
86
}
87
91
static
inline
int
arch_write_can_lock
(
arch_rwlock_t
*rwlock)
92
{
93
return
rwlock->
lock
== 0;
94
}
95
99
void
arch_read_lock
(
arch_rwlock_t
*rwlock);
100
104
void
arch_write_lock
(
arch_rwlock_t
*rwlock);
105
109
int
arch_read_trylock
(
arch_rwlock_t
*rwlock);
110
114
int
arch_write_trylock
(
arch_rwlock_t
*rwlock);
115
119
void
arch_read_unlock
(
arch_rwlock_t
*rwlock);
120
124
void
arch_write_unlock
(
arch_rwlock_t
*rwlock);
125
126
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
127
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
128
129
#endif
/* _ASM_TILE_SPINLOCK_32_H */
Generated on Thu Jan 10 2013 13:18:13 for Linux Kernel by
1.8.2