18 #include <linux/module.h>
24 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
30 struct atomic_locks_on_cpu {
31 int lock[ATOMIC_HASH_L2_SIZE];
34 static DEFINE_PER_CPU(
struct atomic_locks_on_cpu, atomic_lock_pool);
37 static struct atomic_locks_on_cpu
__initdata initial_atomic_locks;
40 struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
42 [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks)
55 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
58 unsigned long n = __insn_crc32_32(0, i);
61 unsigned long l1_index = n >> ((
sizeof(
n) * 8) - ATOMIC_HASH_L1_SHIFT);
63 unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1);
65 return &atomic_lock_ptr[l1_index]->lock[l2_index];
71 unsigned long ptr = __insn_mm((
unsigned long)v >> 1,
80 static int is_atomic_lock(
int *
p)
82 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
84 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++
i) {
86 if (p >= &atomic_lock_ptr[i]->lock[0] &&
87 p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) {
97 void __atomic_fault_unlock(
int *irqlock_word)
99 BUG_ON(!is_atomic_lock(irqlock_word));
100 BUG_ON(*irqlock_word != 1);
106 static inline int *__atomic_setup(
volatile void *
v)
145 return __atomic_or((
int *)p, __atomic_setup(p), mask).val;
157 return __atomic_xor((
int *)p, __atomic_setup(p), mask).val;
206 panic(
"Bad address used for kernel atomic op: %p\n",
addr);
211 #if CHIP_HAS_CBOX_HOME_MAP()
212 static int __init noatomichash(
char *
str)
217 __setup(
"noatomichash", noatomichash);
222 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
245 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++
i) {
254 atomic_lock_ptr[
i] = &
per_cpu(atomic_lock_pool, actual_cpu);