#include <asm/barrier.h>
#include <arch/chip.h>
Go to the source code of this file.
|
#define | ATOMIC64_INIT(val) { (val) } |
|
#define | atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) |
|
#define | atomic64_inc(v) atomic64_add(1LL, (v)) |
|
#define | atomic64_inc_return(v) atomic64_add_return(1LL, (v)) |
|
#define | atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) |
|
#define | atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) |
|
#define | atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) |
|
#define | atomic64_sub(i, v) atomic64_add(-(i), (v)) |
|
#define | atomic64_dec(v) atomic64_sub(1LL, (v)) |
|
#define | atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) |
|
#define | atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) |
|
#define | atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) |
|
#define | smp_mb__before_atomic_dec() smp_mb() |
|
#define | smp_mb__before_atomic_inc() smp_mb() |
|
#define | smp_mb__after_atomic_dec() do { } while (0) |
|
#define | smp_mb__after_atomic_inc() do { } while (0) |
|
#define | ATOMIC_LOCKS_FOUND_VIA_TABLE() (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP)) |
|
#define | ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) |
|
#define | ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) |
|
#define | ATOMIC_LOCK_REG 20 |
|
#define | ATOMIC_LOCK_REG_NAME r20 |
|
|
int | _atomic_xchg (atomic_t *v, int n) |
|
int | _atomic_xchg_add (atomic_t *v, int i) |
|
int | _atomic_xchg_add_unless (atomic_t *v, int a, int u) |
|
int | _atomic_cmpxchg (atomic_t *v, int o, int n) |
|
u64 | _atomic64_xchg (atomic64_t *v, u64 n) |
|
u64 | _atomic64_xchg_add (atomic64_t *v, u64 i) |
|
u64 | _atomic64_xchg_add_unless (atomic64_t *v, u64 a, u64 u) |
|
u64 | _atomic64_cmpxchg (atomic64_t *v, u64 o, u64 n) |
|
void | __init_atomic_per_cpu (void) |
|
int * | __atomic_hashed_lock (volatile void *v) |
|
struct __get_user | __atomic_cmpxchg (volatile int *p, int *lock, int o, int n) |
|
struct __get_user | __atomic_xchg (volatile int *p, int *lock, int n) |
|
struct __get_user | __atomic_xchg_add (volatile int *p, int *lock, int n) |
|
struct __get_user | __atomic_xchg_add_unless (volatile int *p, int *lock, int o, int n) |
|
struct __get_user | __atomic_or (volatile int *p, int *lock, int n) |
|
struct __get_user | __atomic_andn (volatile int *p, int *lock, int n) |
|
struct __get_user | __atomic_xor (volatile int *p, int *lock, int n) |
|
u64 | __atomic64_cmpxchg (volatile u64 *p, int *lock, u64 o, u64 n) |
|
u64 | __atomic64_xchg (volatile u64 *p, int *lock, u64 n) |
|
u64 | __atomic64_xchg_add (volatile u64 *p, int *lock, u64 n) |
|
u64 | __atomic64_xchg_add_unless (volatile u64 *p, int *lock, u64 o, u64 n) |
|
struct __get_user | __atomic_bad_address (int __user *addr) |
|
#define ATOMIC64_INIT |
( |
|
val | ) |
{ (val) } |
#define ATOMIC_LOCK_REG 20 |
#define ATOMIC_LOCK_REG_NAME r20 |
#define smp_mb__after_atomic_dec |
( |
| ) |
do { } while (0) |
#define smp_mb__after_atomic_inc |
( |
| ) |
do { } while (0) |
#define smp_mb__before_atomic_dec |
( |
| ) |
smp_mb() |
#define smp_mb__before_atomic_inc |
( |
| ) |
smp_mb() |
u64 __atomic64_xchg_add |
( |
volatile u64 * |
p, |
|
|
int * |
lock, |
|
|
u64 |
n |
|
) |
| |
u64 __atomic64_xchg_add_unless |
( |
volatile u64 * |
p, |
|
|
int * |
lock, |
|
|
u64 |
o, |
|
|
u64 |
n |
|
) |
| |
int* __atomic_hashed_lock |
( |
volatile void * |
v | ) |
|