11 #include <asm/paravirt.h>
14 #include <xen/events.h>
19 #ifdef CONFIG_XEN_DEBUG_FS
20 static struct xen_spinlock_stats
24 u32 taken_slow_nested;
25 u32 taken_slow_pickup;
26 u32 taken_slow_spurious;
27 u32 taken_slow_irqenable;
31 u32 released_slow_kicked;
33 #define HISTO_BUCKETS 30
34 u32 histo_spin_total[HISTO_BUCKETS+1];
35 u32 histo_spin_spinning[HISTO_BUCKETS+1];
36 u32 histo_spin_blocked[HISTO_BUCKETS+1];
45 static unsigned lock_timeout = 1 << 10;
46 #define TIMEOUT lock_timeout
48 static inline void check_zero(
void)
51 memset(&spinlock_stats, 0,
sizeof(spinlock_stats));
56 #define ADD_STATS(elem, val) \
57 do { check_zero(); spinlock_stats.elem += (val); } while(0)
59 static inline u64 spin_time_start(
void)
64 static void __spin_time_accum(
u64 delta,
u32 *array)
70 if (index < HISTO_BUCKETS)
73 array[HISTO_BUCKETS]++;
76 static inline void spin_time_accum_spinning(
u64 start)
80 __spin_time_accum(delta, spinlock_stats.histo_spin_spinning);
81 spinlock_stats.time_spinning +=
delta;
84 static inline void spin_time_accum_total(
u64 start)
88 __spin_time_accum(delta, spinlock_stats.histo_spin_total);
89 spinlock_stats.time_total +=
delta;
92 static inline void spin_time_accum_blocked(
u64 start)
96 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
97 spinlock_stats.time_blocked +=
delta;
100 #define TIMEOUT (1 << 10)
101 #define ADD_STATS(elem, val) do { (void)(val); } while(0)
103 static inline u64 spin_time_start(
void)
108 static inline void spin_time_accum_total(
u64 start)
111 static inline void spin_time_accum_spinning(
u64 start)
114 static inline void spin_time_accum_blocked(
u64 start)
124 # define inc_spinners(xl) \
125 asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
126 # define dec_spinners(xl) \
127 asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
130 # define inc_spinners(xl) \
131 asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
132 # define dec_spinners(xl) \
133 asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
145 return xl->
lock != 0;
163 :
"+q" (old),
"+m" (xl->
lock) : :
"memory");
212 start = spin_time_start();
215 prev = spinning_lock(xl);
228 ret = xen_spin_trylock(lock);
267 unspinning_lock(xl, prev);
268 spin_time_accum_blocked(start);
273 static inline void __xen_spin_lock(
struct arch_spinlock *lock,
bool irq_enable)
282 start_spin = spin_time_start();
285 u64 start_spin_fast = spin_time_start();
289 asm(
"1: xchgb %1,%0\n"
298 :
"+m" (xl->
lock),
"=q" (oldval),
"+r" (timeout)
302 spin_time_accum_spinning(start_spin_fast);
305 (
TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
307 spin_time_accum_total(start_spin);
312 __xen_spin_lock(lock,
false);
315 static void xen_spin_lock_flags(
struct arch_spinlock *lock,
unsigned long flags)
328 if (
per_cpu(lock_spinners, cpu) == xl) {
354 xen_spin_unlock_slow(xl);
378 per_cpu(lock_kicker_irq, cpu) = irq;
381 printk(
"cpu %d spinlock event irq %d\n", cpu, irq);
401 #ifdef CONFIG_XEN_DEBUG_FS
403 static struct dentry *d_spin_debug;
405 static int __init xen_spinlock_debugfs(
void)
420 &spinlock_stats.taken_slow);
422 &spinlock_stats.taken_slow_nested);
424 &spinlock_stats.taken_slow_pickup);
426 &spinlock_stats.taken_slow_spurious);
428 &spinlock_stats.taken_slow_irqenable);
432 &spinlock_stats.released_slow);
434 &spinlock_stats.released_slow_kicked);
437 &spinlock_stats.time_spinning);
439 &spinlock_stats.time_blocked);
441 &spinlock_stats.time_total);
444 spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
446 spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
448 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);