38 #include <linux/kernel.h>
39 #include <linux/module.h>
41 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/list.h>
48 #include <linux/hw_breakpoint.h>
69 static int constraints_initialized;
100 unsigned int *tsk_pinned =
per_cpu(nr_task_bp_pinned[type], cpu);
102 for (i = nr_slots[type] - 1; i >= 0; i--) {
103 if (tsk_pinned[i] > 0)
121 if (iter->hw.bp_target == tsk &&
122 find_slot_idx(iter) == type &&
144 slots->
pinned += max_task_bp_pinned(cpu, type);
146 slots->
pinned += task_bp_pinned(cpu, bp, type);
155 nr =
per_cpu(nr_cpu_bp_pinned[type], cpu);
157 nr += max_task_bp_pinned(cpu, type);
159 nr += task_bp_pinned(cpu, bp, type);
164 nr =
per_cpu(nr_bp_flexible[type], cpu);
185 static void toggle_bp_task_slot(
struct perf_event *bp,
int cpu,
bool enable,
188 unsigned int *tsk_pinned;
193 old_count = task_bp_pinned(cpu, bp, type);
194 old_idx = old_count - 1;
198 tsk_pinned =
per_cpu(nr_task_bp_pinned[type], cpu);
202 tsk_pinned[old_idx]--;
206 tsk_pinned[old_idx]++;
236 toggle_bp_task_slot(bp, cpu, enable, type, weight);
239 toggle_bp_task_slot(bp, cpu, enable, type, weight);
298 static int __reserve_bp_slot(
struct perf_event *bp)
305 if (!constraints_initialized)
313 type = find_slot_idx(bp);
316 fetch_bp_busy_slots(&slots, bp, type);
321 fetch_this_slot(&slots, weight);
327 toggle_bp_slot(bp,
true, type, weight);
338 ret = __reserve_bp_slot(bp);
345 static void __release_bp_slot(
struct perf_event *bp)
350 type = find_slot_idx(bp);
352 toggle_bp_slot(bp,
false, type, weight);
360 __release_bp_slot(bp);
372 if (mutex_is_locked(&nr_bp_mutex))
375 return __reserve_bp_slot(bp);
380 if (mutex_is_locked(&nr_bp_mutex))
383 __release_bp_slot(bp);
388 static int validate_hw_breakpoint(
struct perf_event *bp)
397 if (bp->
attr.exclude_kernel)
418 ret = validate_hw_breakpoint(bp);
453 u64 old_addr = bp->
attr.bp_addr;
455 int old_type = bp->
attr.bp_type;
476 err = validate_hw_breakpoint(bp);
481 bp->
attr.bp_addr = old_addr;
482 bp->
attr.bp_type = old_type;
483 bp->
attr.bp_len = old_len;
484 if (!bp->
attr.disabled)
580 .priority = 0x7fffffff
588 static int hw_breakpoint_event_init(
struct perf_event *bp)
598 if (has_branch_stack(bp))
605 bp->destroy = bp_perf_event_destroy;
618 static void hw_breakpoint_del(
struct perf_event *bp,
int flags)
623 static void hw_breakpoint_start(
struct perf_event *bp,
int flags)
628 static void hw_breakpoint_stop(
struct perf_event *bp,
int flags)
633 static int hw_breakpoint_event_idx(
struct perf_event *bp)
638 static struct pmu perf_breakpoint = {
641 .event_init = hw_breakpoint_event_init,
642 .add = hw_breakpoint_add,
643 .del = hw_breakpoint_del,
644 .start = hw_breakpoint_start,
645 .stop = hw_breakpoint_stop,
648 .event_idx = hw_breakpoint_event_idx,
653 unsigned int **task_bp_pinned;
662 task_bp_pinned = &
per_cpu(nr_task_bp_pinned[i], cpu);
663 *task_bp_pinned = kzalloc(
sizeof(
int) * nr_slots[i],
665 if (!*task_bp_pinned)
670 constraints_initialized = 1;