22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
31 #include <linux/sysctl.h>
32 #include <linux/list.h>
34 #include <linux/poll.h>
39 #include <linux/bitops.h>
40 #include <linux/capability.h>
44 #include <linux/slab.h>
46 #include <asm/errno.h>
47 #include <asm/intrinsics.h>
49 #include <asm/perfmon.h>
50 #include <asm/processor.h>
51 #include <asm/signal.h>
52 #include <asm/uaccess.h>
53 #include <asm/delay.h>
59 #define PFM_CTX_UNLOADED 1
60 #define PFM_CTX_LOADED 2
61 #define PFM_CTX_MASKED 3
62 #define PFM_CTX_ZOMBIE 4
64 #define PFM_INVALID_ACTIVATION (~0UL)
66 #define PFM_NUM_PMC_REGS 64
67 #define PFM_NUM_PMD_REGS 64
72 #define PFM_MAX_MSGS 32
73 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
86 #define PFM_REG_NOTIMPL 0x0
87 #define PFM_REG_IMPL 0x1
88 #define PFM_REG_END 0x2
89 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
90 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
91 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
92 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
93 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
95 #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
96 #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
98 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
101 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
102 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
105 #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106 #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107 #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
108 #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
110 #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
111 #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
112 #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
113 #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
115 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
116 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
118 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
119 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
120 #define PFM_CTX_TASK(h) (h)->ctx_task
125 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
126 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
128 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
130 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
131 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
132 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
133 #define PFM_CODE_RR 0
134 #define PFM_DATA_RR 1
136 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
137 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
138 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
140 #define RDEP(x) (1UL<<(x))
160 #define PROTECT_CTX(c, f) \
162 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
163 spin_lock_irqsave(&(c)->ctx_lock, f); \
164 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
167 #define UNPROTECT_CTX(c, f) \
169 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
170 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
173 #define PROTECT_CTX_NOPRINT(c, f) \
175 spin_lock_irqsave(&(c)->ctx_lock, f); \
179 #define UNPROTECT_CTX_NOPRINT(c, f) \
181 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
185 #define PROTECT_CTX_NOIRQ(c) \
187 spin_lock(&(c)->ctx_lock); \
190 #define UNPROTECT_CTX_NOIRQ(c) \
192 spin_unlock(&(c)->ctx_lock); \
198 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
199 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
200 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
203 #define SET_ACTIVATION(t) do {} while(0)
204 #define GET_ACTIVATION(t) do {} while(0)
205 #define INC_ACTIVATION(t) do {} while(0)
208 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
209 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
210 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
212 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
213 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
215 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
220 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
222 #define PFMFS_MAGIC 0xa0b4d889
227 #define PFM_DEBUGGING 1
231 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
234 #define DPRINT_ovfl(a) \
236 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
248 unsigned long long_reset;
249 unsigned long short_reset;
250 unsigned long reset_pmds[4];
251 unsigned long smpl_pmds[4];
255 unsigned long eventid;
262 unsigned int block:1;
264 unsigned int using_dbreg:1;
265 unsigned int is_sampling:1;
266 unsigned int excl_idle:1;
267 unsigned int going_zombie:1;
269 unsigned int no_msg:1;
270 unsigned int can_restart:1;
272 } pfm_context_flags_t;
274 #define PFM_TRAP_REASON_NONE 0x0
275 #define PFM_TRAP_REASON_BLOCK 0x1
276 #define PFM_TRAP_REASON_RESET 0x2
283 typedef struct pfm_context {
286 pfm_context_flags_t ctx_flags;
287 unsigned int ctx_state;
291 unsigned long ctx_ovfl_regs[4];
295 unsigned long ctx_used_pmds[4];
296 unsigned long ctx_all_pmds[4];
297 unsigned long ctx_reload_pmds[4];
299 unsigned long ctx_all_pmcs[4];
300 unsigned long ctx_reload_pmcs[4];
301 unsigned long ctx_used_monitors[4];
303 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
305 unsigned int ctx_used_ibrs[1];
306 unsigned int ctx_used_dbrs[1];
310 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
312 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
313 unsigned long th_pmds[PFM_NUM_PMD_REGS];
315 unsigned long ctx_saved_psr_up;
317 unsigned long ctx_last_activation;
318 unsigned int ctx_last_cpu;
319 unsigned int ctx_cpu;
326 unsigned long ctx_smpl_size;
327 void *ctx_smpl_vaddr;
342 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
344 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
347 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
348 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
350 #define SET_LAST_CPU(ctx, v) do {} while(0)
351 #define GET_LAST_CPU(ctx) do {} while(0)
355 #define ctx_fl_block ctx_flags.block
356 #define ctx_fl_system ctx_flags.system
357 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
358 #define ctx_fl_is_sampling ctx_flags.is_sampling
359 #define ctx_fl_excl_idle ctx_flags.excl_idle
360 #define ctx_fl_going_zombie ctx_flags.going_zombie
361 #define ctx_fl_trap_reason ctx_flags.trap_reason
362 #define ctx_fl_no_msg ctx_flags.no_msg
363 #define ctx_fl_can_restart ctx_flags.can_restart
365 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
366 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
375 unsigned int pfs_task_sessions;
376 unsigned int pfs_sys_sessions;
377 unsigned int pfs_sys_use_dbregs;
378 unsigned int pfs_ptrace_use_dbregs;
392 unsigned long reserved_mask;
393 pfm_reg_check_t read_check;
394 pfm_reg_check_t write_check;
395 unsigned long dep_pmd[4];
396 unsigned long dep_pmc[4];
400 #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
415 unsigned long ovfl_val;
417 pfm_reg_desc_t *pmc_desc;
418 pfm_reg_desc_t *pmd_desc;
420 unsigned int num_pmcs;
421 unsigned int num_pmds;
422 unsigned long impl_pmcs[4];
423 unsigned long impl_pmds[4];
426 unsigned int pmu_family;
428 unsigned int num_ibrs;
429 unsigned int num_dbrs;
430 unsigned int num_counters;
432 unsigned int use_rr_dbregs:1;
437 #define PFM_PMU_IRQ_RESEND 1
443 unsigned long ibr_mask:56;
444 unsigned long ibr_plm:4;
445 unsigned long ibr_ig:3;
446 unsigned long ibr_x:1;
450 unsigned long dbr_mask:56;
451 unsigned long dbr_plm:4;
452 unsigned long dbr_ig:2;
453 unsigned long dbr_w:1;
454 unsigned long dbr_r:1;
471 unsigned int cmd_narg;
473 int (*cmd_getsize)(
void *
arg,
size_t *sz);
476 #define PFM_CMD_FD 0x01
477 #define PFM_CMD_ARG_READ 0x02
478 #define PFM_CMD_ARG_RW 0x04
479 #define PFM_CMD_STOP 0x08
482 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
483 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
484 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
485 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
486 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
488 #define PFM_CMD_ARG_MANY -1
491 unsigned long pfm_spurious_ovfl_intr_count;
492 unsigned long pfm_replay_ovfl_intr_count;
493 unsigned long pfm_ovfl_intr_count;
494 unsigned long pfm_ovfl_intr_cycles;
495 unsigned long pfm_ovfl_intr_cycles_min;
496 unsigned long pfm_ovfl_intr_cycles_max;
497 unsigned long pfm_smpl_handler_calls;
498 unsigned long pfm_smpl_handler_cycles;
505 static pfm_stats_t pfm_stats[
NR_CPUS];
506 static pfm_session_t pfm_sessions;
517 static pmu_config_t *pmu_conf;
526 .data = &pfm_sysctl.
debug,
527 .maxlen =
sizeof(
int),
532 .procname =
"debug_ovfl",
534 .maxlen =
sizeof(
int),
539 .procname =
"fastctxsw",
541 .maxlen =
sizeof(
int),
546 .procname =
"expert_mode",
548 .maxlen =
sizeof(
int),
558 .child = pfm_ctl_table,
566 .child = pfm_sysctl_dir,
574 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
575 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
580 if (task !=
current) put_task_struct(task);
584 pfm_reserve_page(
unsigned long a)
589 pfm_unreserve_page(
unsigned long a)
594 static inline unsigned long
595 pfm_protect_ctx_ctxsw(pfm_context_t *
x)
597 spin_lock(&(x)->ctx_lock);
602 pfm_unprotect_ctx_ctxsw(pfm_context_t *x,
unsigned long f)
604 spin_unlock(&(x)->ctx_lock);
619 .mount = pfmfs_mount,
640 void dump_pmu_state(
const char *);
648 static pmu_config_t *pmu_confs[]={
657 static int pfm_end_notify_user(pfm_context_t *
ctx);
660 pfm_clear_psr_pp(
void)
674 pfm_clear_psr_up(
void)
687 static inline unsigned long
697 pfm_set_psr_l(
unsigned long val)
711 pfm_unfreeze_pmu(
void)
718 pfm_restore_ibrs(
unsigned long *ibrs,
unsigned int nibrs)
722 for (i=0; i < nibrs; i++) {
730 pfm_restore_dbrs(
unsigned long *dbrs,
unsigned int ndbrs)
734 for (i=0; i < ndbrs; i++) {
735 ia64_set_dbr(i, dbrs[i]);
744 static inline unsigned long
745 pfm_read_soft_counter(pfm_context_t *
ctx,
int i)
747 return ctx->ctx_pmds[
i].val + (
ia64_get_pmd(i) & pmu_conf->ovfl_val);
754 pfm_write_soft_counter(pfm_context_t *
ctx,
int i,
unsigned long val)
756 unsigned long ovfl_val = pmu_conf->ovfl_val;
758 ctx->ctx_pmds[
i].val = val & ~ovfl_val;
767 pfm_get_new_msg(pfm_context_t *
ctx)
771 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
773 DPRINT((
"ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
774 if (next == ctx->ctx_msgq_head)
return NULL;
776 idx = ctx->ctx_msgq_tail;
777 ctx->ctx_msgq_tail =
next;
779 DPRINT((
"ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
781 return ctx->ctx_msgq+
idx;
785 pfm_get_next_msg(pfm_context_t *ctx)
789 DPRINT((
"ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
791 if (PFM_CTXQ_EMPTY(ctx))
return NULL;
796 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
801 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
809 pfm_reset_msgq(pfm_context_t *ctx)
811 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
812 DPRINT((
"ctx=%p msgq reset\n", ctx));
816 pfm_rvmalloc(
unsigned long size)
825 addr = (
unsigned long)mem;
827 pfm_reserve_page(addr);
836 pfm_rvfree(
void *mem,
unsigned long size)
841 DPRINT((
"freeing physical buffer @%p size=%lu\n", mem, size));
842 addr = (
unsigned long) mem;
843 while ((
long) size > 0) {
844 pfm_unreserve_page(addr);
853 static pfm_context_t *
854 pfm_context_alloc(
int ctx_flags)
862 ctx = kzalloc(
sizeof(pfm_context_t),
GFP_KERNEL);
864 DPRINT((
"alloc ctx @%p\n", ctx));
874 ctx->ctx_state = PFM_CTX_UNLOADED;
890 init_completion(&ctx->ctx_restart_done);
895 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
896 SET_LAST_CPU(ctx, -1);
901 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
910 pfm_context_free(pfm_context_t *ctx)
913 DPRINT((
"free ctx @%p\n", ctx));
921 pfm_context_t *ctx = PFM_GET_CTX(task);
922 unsigned long mask,
val, ovfl_mask;
925 DPRINT_ovfl((
"masking monitoring for [%d]\n", task_pid_nr(task)));
927 ovfl_mask = pmu_conf->ovfl_val;
947 mask = ctx->ctx_used_pmds[0];
948 for (i = 0;
mask; i++, mask>>=1) {
950 if ((mask & 0x1) == 0)
continue;
953 if (PMD_IS_COUNTING(i)) {
957 ctx->ctx_pmds[
i].val += (val & ovfl_mask);
959 ctx->ctx_pmds[
i].val =
val;
963 ctx->ctx_pmds[i].val,
976 if ((mask & 0x1) == 0
UL)
continue;
978 ctx->th_pmcs[
i] &= ~0xf
UL;
979 DPRINT_ovfl((
"pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
995 pfm_context_t *ctx = PFM_GET_CTX(task);
996 unsigned long mask, ovfl_mask;
1000 is_system = ctx->ctx_fl_system;
1001 ovfl_mask = pmu_conf->ovfl_val;
1004 printk(
KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(
current));
1007 if (ctx->ctx_state != PFM_CTX_MASKED) {
1008 printk(
KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1009 task_pid_nr(task), task_pid_nr(
current), ctx->ctx_state);
1012 psr = pfm_get_psr();
1033 mask = ctx->ctx_used_pmds[0];
1034 for (i = 0;
mask; i++, mask>>=1) {
1036 if ((mask & 0x1) == 0)
continue;
1038 if (PMD_IS_COUNTING(i)) {
1043 val = ctx->ctx_pmds[
i].val & ovfl_mask;
1044 ctx->ctx_pmds[
i].val &= ~ovfl_mask;
1046 val = ctx->ctx_pmds[
i].val;
1050 DPRINT((
"pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1052 ctx->ctx_pmds[i].val,
1060 if ((mask & 0x1) == 0
UL)
continue;
1061 ctx->th_pmcs[
i] = ctx->ctx_pmcs[
i];
1063 DPRINT((
"[%d] pmc[%d]=0x%lx\n",
1064 task_pid_nr(task), i, ctx->th_pmcs[i]));
1072 if (ctx->ctx_fl_using_dbreg) {
1073 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1074 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1089 pfm_save_pmds(
unsigned long *pmds,
unsigned long mask)
1095 for (i=0;
mask; i++, mask>>=1) {
1104 pfm_restore_pmds(
unsigned long *pmds,
unsigned long mask)
1107 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1109 for (i=0;
mask; i++, mask>>=1) {
1110 if ((mask & 0x1) == 0)
continue;
1111 val = PMD_IS_COUNTING(i) ? pmds[
i] & ovfl_val : pmds[
i];
1121 pfm_copy_pmds(
struct task_struct *task, pfm_context_t *ctx)
1123 unsigned long ovfl_val = pmu_conf->ovfl_val;
1124 unsigned long mask = ctx->ctx_all_pmds[0];
1128 DPRINT((
"mask=0x%lx\n", mask));
1130 for (i=0;
mask; i++, mask>>=1) {
1132 val = ctx->ctx_pmds[
i].val;
1140 if (PMD_IS_COUNTING(i)) {
1141 ctx->ctx_pmds[
i].val = val & ~ovfl_val;
1144 ctx->th_pmds[
i] =
val;
1146 DPRINT((
"pmd[%d]=0x%lx soft_val=0x%lx\n",
1149 ctx->ctx_pmds[i].val));
1157 pfm_copy_pmcs(
struct task_struct *task, pfm_context_t *ctx)
1159 unsigned long mask = ctx->ctx_all_pmcs[0];
1162 DPRINT((
"mask=0x%lx\n", mask));
1164 for (i=0;
mask; i++, mask>>=1) {
1166 ctx->th_pmcs[
i] = ctx->ctx_pmcs[
i];
1167 DPRINT((
"pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1174 pfm_restore_pmcs(
unsigned long *pmcs,
unsigned long mask)
1178 for (i=0;
mask; i++, mask>>=1) {
1179 if ((mask & 0x1) == 0)
continue;
1250 if (pfm_uuid_cmp(uuid, entry->
fmt_uuid) == 0)
1263 spin_lock(&pfm_buffer_fmt_lock);
1264 fmt = __pfm_find_buffer_fmt(uuid);
1265 spin_unlock(&pfm_buffer_fmt_lock);
1284 spin_lock(&pfm_buffer_fmt_lock);
1286 if (__pfm_find_buffer_fmt(fmt->
fmt_uuid)) {
1291 list_add(&fmt->
fmt_list, &pfm_buffer_fmt_list);
1295 spin_unlock(&pfm_buffer_fmt_lock);
1306 spin_lock(&pfm_buffer_fmt_lock);
1308 fmt = __pfm_find_buffer_fmt(uuid);
1310 printk(
KERN_ERR "perfmon: cannot unregister format, not found\n");
1318 spin_unlock(&pfm_buffer_fmt_lock);
1327 pfm_reserve_session(
struct task_struct *task,
int is_syswide,
unsigned int cpu)
1329 unsigned long flags;
1335 DPRINT((
"in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1336 pfm_sessions.pfs_sys_sessions,
1337 pfm_sessions.pfs_task_sessions,
1338 pfm_sessions.pfs_sys_use_dbregs,
1346 if (pfm_sessions.pfs_task_sessions > 0
UL) {
1347 DPRINT((
"system wide not possible, %u conflicting task_sessions\n",
1348 pfm_sessions.pfs_task_sessions));
1352 if (pfm_sessions.pfs_sys_session[cpu])
goto error_conflict;
1356 pfm_sessions.pfs_sys_session[
cpu] =
task;
1358 pfm_sessions.pfs_sys_sessions++ ;
1361 if (pfm_sessions.pfs_sys_sessions)
goto abort;
1362 pfm_sessions.pfs_task_sessions++;
1365 DPRINT((
"out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1366 pfm_sessions.pfs_sys_sessions,
1367 pfm_sessions.pfs_task_sessions,
1368 pfm_sessions.pfs_sys_use_dbregs,
1382 DPRINT((
"system wide not possible, conflicting session [%d] on CPU%d\n",
1383 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1393 pfm_unreserve_session(pfm_context_t *ctx,
int is_syswide,
unsigned int cpu)
1395 unsigned long flags;
1401 DPRINT((
"in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1402 pfm_sessions.pfs_sys_sessions,
1403 pfm_sessions.pfs_task_sessions,
1404 pfm_sessions.pfs_sys_use_dbregs,
1410 pfm_sessions.pfs_sys_session[
cpu] =
NULL;
1414 if (ctx && ctx->ctx_fl_using_dbreg) {
1415 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1416 printk(
KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1418 pfm_sessions.pfs_sys_use_dbregs--;
1421 pfm_sessions.pfs_sys_sessions--;
1423 pfm_sessions.pfs_task_sessions--;
1425 DPRINT((
"out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1426 pfm_sessions.pfs_sys_sessions,
1427 pfm_sessions.pfs_task_sessions,
1428 pfm_sessions.pfs_sys_use_dbregs,
1435 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1449 pfm_remove_smpl_mapping(
void *
vaddr,
unsigned long size)
1455 if (task->
mm ==
NULL || size == 0
UL || vaddr ==
NULL) {
1456 printk(
KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->
mm);
1460 DPRINT((
"smpl_vaddr=%p size=%lu\n", vaddr, size));
1465 r =
vm_munmap((
unsigned long)vaddr, size);
1468 printk(
KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1471 DPRINT((
"do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1481 pfm_free_smpl_buffer(pfm_context_t *ctx)
1485 if (ctx->ctx_smpl_hdr ==
NULL)
goto invalid_free;
1490 fmt = ctx->ctx_buf_fmt;
1492 DPRINT((
"sampling buffer @%p size %lu vaddr=%p\n",
1495 ctx->ctx_smpl_vaddr));
1502 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1504 ctx->ctx_smpl_hdr =
NULL;
1505 ctx->ctx_smpl_size = 0
UL;
1518 if (fmt ==
NULL)
return;
1538 err = PTR_ERR(pfmfs_mnt);
1539 if (IS_ERR(pfmfs_mnt))
1548 pfm_read(
struct file *filp,
char __user *buf,
size_t size, loff_t *ppos)
1553 unsigned long flags;
1555 if (PFM_IS_FILE(filp) == 0) {
1570 DPRINT((
"message is too small ctx=%p (>=%ld)\n", ctx,
sizeof(
pfm_msg_t)));
1574 PROTECT_CTX(ctx, flags);
1589 DPRINT((
"head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1592 if(PFM_CTXQ_EMPTY(ctx) == 0)
break;
1594 UNPROTECT_CTX(ctx, flags);
1614 PROTECT_CTX(ctx, flags);
1616 DPRINT((
"[%d] back to running ret=%ld\n", task_pid_nr(
current), ret));
1620 if (ret < 0)
goto abort;
1623 msg = pfm_get_next_msg(ctx);
1635 UNPROTECT_CTX(ctx, flags);
1641 pfm_write(
struct file *
file,
const char __user *ubuf,
1642 size_t size, loff_t *ppos)
1644 DPRINT((
"pfm_write called\n"));
1652 unsigned long flags;
1653 unsigned int mask = 0;
1655 if (PFM_IS_FILE(filp) == 0) {
1667 DPRINT((
"pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1669 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1671 PROTECT_CTX(ctx, flags);
1673 if (PFM_CTXQ_EMPTY(ctx) == 0)
1676 UNPROTECT_CTX(ctx, flags);
1678 DPRINT((
"pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1684 pfm_ioctl(
struct file *file,
unsigned int cmd,
unsigned long arg)
1686 DPRINT((
"pfm_ioctl called\n"));
1694 pfm_do_fasync(
int fd,
struct file *filp, pfm_context_t *ctx,
int on)
1700 DPRINT((
"pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1704 ctx->ctx_async_queue, ret));
1710 pfm_fasync(
int fd,
struct file *filp,
int on)
1715 if (PFM_IS_FILE(filp) == 0) {
1732 ret = pfm_do_fasync(fd, filp, ctx, on);
1735 DPRINT((
"pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1738 ctx->ctx_async_queue, ret));
1750 pfm_syswide_force_stop(
void *
info)
1752 pfm_context_t *ctx = (pfm_context_t *)info;
1755 unsigned long flags;
1759 printk(
KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1764 owner = GET_PMU_OWNER();
1765 if (owner != ctx->ctx_task) {
1766 printk(
KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1768 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1771 if (GET_PMU_CTX() != ctx) {
1772 printk(
KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1774 GET_PMU_CTX(), ctx);
1778 DPRINT((
"on CPU%d forcing system wide stop for [%d]\n",
smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1786 ret = pfm_context_unload(ctx,
NULL, 0, regs);
1788 DPRINT((
"context_unload returned %d\n", ret));
1798 pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1802 DPRINT((
"calling CPU%d for cleanup\n", ctx->ctx_cpu));
1804 DPRINT((
"called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1818 unsigned long flags;
1819 unsigned long smpl_buf_size = 0
UL;
1820 void *smpl_buf_vaddr =
NULL;
1821 int state, is_system;
1823 if (PFM_IS_FILE(filp) == 0) {
1824 DPRINT((
"bad magic for\n"));
1847 PROTECT_CTX(ctx, flags);
1849 state = ctx->ctx_state;
1850 is_system = ctx->ctx_fl_system;
1852 task = PFM_CTX_TASK(ctx);
1855 DPRINT((
"ctx_state=%d is_current=%d\n",
1877 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
1883 pfm_syswide_cleanup_other_cpu(ctx);
1897 DPRINT((
"forcing unload\n"));
1902 pfm_context_unload(ctx,
NULL, 0, regs);
1904 DPRINT((
"ctx_state=%d\n", ctx->ctx_state));
1919 if (ctx->ctx_smpl_vaddr &&
current->mm) {
1920 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1921 smpl_buf_size = ctx->ctx_smpl_size;
1924 UNPROTECT_CTX(ctx, flags);
1932 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1952 pfm_close(
struct inode *
inode,
struct file *filp)
1958 unsigned long flags;
1959 unsigned long smpl_buf_size = 0
UL;
1960 void *smpl_buf_addr =
NULL;
1961 int free_possible = 1;
1962 int state, is_system;
1966 if (PFM_IS_FILE(filp) == 0) {
1977 PROTECT_CTX(ctx, flags);
1979 state = ctx->ctx_state;
1980 is_system = ctx->ctx_fl_system;
1982 task = PFM_CTX_TASK(ctx);
1985 DPRINT((
"ctx_state=%d is_current=%d\n",
1992 if (state == PFM_CTX_UNLOADED)
goto doit;
2006 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2022 ctx->ctx_fl_going_zombie = 1;
2029 DPRINT((
"waking up ctx_state=%d\n", state));
2042 UNPROTECT_CTX(ctx, flags);
2052 PROTECT_CTX(ctx, flags);
2061 DPRINT((
"after zombie wakeup ctx_state=%d for\n", state));
2068 ctx->ctx_state = PFM_CTX_ZOMBIE;
2070 DPRINT((
"zombie ctx for [%d]\n", task_pid_nr(task)));
2077 pfm_context_unload(ctx,
NULL, 0, regs);
2083 state = ctx->ctx_state;
2099 if (ctx->ctx_smpl_hdr) {
2100 smpl_buf_addr = ctx->ctx_smpl_hdr;
2101 smpl_buf_size = ctx->ctx_smpl_size;
2103 ctx->ctx_smpl_hdr =
NULL;
2104 ctx->ctx_fl_is_sampling = 0;
2107 DPRINT((
"ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2113 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2118 if (state == PFM_CTX_ZOMBIE) {
2119 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2135 UNPROTECT_CTX(ctx, flags);
2141 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2146 if (free_possible) pfm_context_free(ctx);
2152 pfm_no_open(
struct inode *irrelevant,
struct file *dontcare)
2154 DPRINT((
"pfm_no_open called\n"));
2165 .unlocked_ioctl = pfm_ioctl,
2166 .open = pfm_no_open,
2167 .fasync = pfm_fasync,
2168 .release = pfm_close,
2186 .d_dname = pfmfs_dname,
2190 static struct file *
2191 pfm_alloc_file(pfm_context_t *ctx)
2194 struct inode *
inode;
2205 DPRINT((
"new inode ino=%ld @%p\n", inode->
i_ino, inode));
2236 pfm_remap_buffer(
struct vm_area_struct *vma,
unsigned long buf,
unsigned long addr,
unsigned long size)
2258 pfm_smpl_buffer_alloc(
struct task_struct *task,
struct file *filp, pfm_context_t *ctx,
unsigned long rsize,
void **user_vaddr)
2271 DPRINT((
"sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2289 smpl_buf = pfm_rvmalloc(size);
2290 if (smpl_buf ==
NULL) {
2291 DPRINT((
"Can't allocate sampling buffer\n"));
2295 DPRINT((
"smpl_buf @%p\n", smpl_buf));
2300 DPRINT((
"Cannot allocate vma\n"));
2309 vma->
vm_file = get_file(filp);
2310 vma->
vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
2318 ctx->ctx_smpl_hdr = smpl_buf;
2319 ctx->ctx_smpl_size =
size;
2332 DPRINT((
"Cannot find unmapped area for size %ld\n", size));
2339 DPRINT((
"aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->
vm_start));
2342 if (pfm_remap_buffer(vma, (
unsigned long)smpl_buf, vma->
vm_start, size)) {
2343 DPRINT((
"Can't remap buffer\n"));
2361 ctx->ctx_smpl_vaddr = (
void *)vma->
vm_start;
2362 *(
unsigned long *)user_vaddr = vma->
vm_start;
2369 pfm_rvfree(smpl_buf, size);
2380 const struct cred *tcred;
2389 DPRINT((
"cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2398 ret = ((!uid_eq(uid, tcred->
euid))
2399 || (!uid_eq(uid, tcred->
suid))
2400 || (!uid_eq(uid, tcred->
uid))
2401 || (!gid_eq(gid, tcred->
egid))
2402 || (!gid_eq(gid, tcred->
sgid))
2424 DPRINT((
"cannot use blocking mode when in system wide monitoring\n"));
2435 pfm_setup_buffer_fmt(
struct task_struct *task,
struct file *filp, pfm_context_t *ctx,
unsigned int ctx_flags,
2439 unsigned long size = 0
UL;
2441 void *fmt_arg =
NULL;
2443 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2448 DPRINT((
"[%d] cannot find buffer format\n", task_pid_nr(task)));
2455 if (fmt->
fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2457 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2459 DPRINT((
"[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2461 if (ret)
goto error;
2464 ctx->ctx_buf_fmt =
fmt;
2465 ctx->ctx_fl_is_sampling = 1;
2470 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2471 if (ret)
goto error;
2477 ret = pfm_smpl_buffer_alloc(
current, filp, ctx, size, &uaddr);
2478 if (ret)
goto error;
2483 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2490 pfm_reset_pmu_state(pfm_context_t *ctx)
2497 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2498 if (PMC_IS_IMPL(i) == 0)
continue;
2499 ctx->ctx_pmcs[
i] = PMC_DFL_VAL(i);
2500 DPRINT((
"pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2529 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2534 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2536 DPRINT((
"<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2541 ctx->ctx_used_ibrs[0] = 0
UL;
2542 ctx->ctx_used_dbrs[0] = 0
UL;
2546 pfm_ctx_getsize(
void *arg,
size_t *sz)
2557 DPRINT((
"cannot find buffer format\n"));
2562 DPRINT((
"arg_size=%lu\n", *sz));
2576 pfm_task_incompatible(pfm_context_t *ctx,
struct task_struct *task)
2582 DPRINT((
"task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2585 if (pfm_bad_permissions(task)) {
2586 DPRINT((
"no permission to attach to [%d]\n", task_pid_nr(task)));
2592 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task ==
current) {
2593 DPRINT((
"cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2598 DPRINT((
"cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2605 if (task ==
current)
return 0;
2608 DPRINT((
"cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->
state));
2614 wait_task_inactive(task, 0);
2628 if (pid < 2)
return -
EPERM;
2630 if (pid != task_pid_vnr(
current)) {
2644 ret = pfm_task_incompatible(ctx, p);
2656 pfm_context_create(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
2666 ret = pfarg_is_sane(
current, req);
2678 ctx = pfm_context_alloc(ctx_flags);
2682 filp = pfm_alloc_file(ctx);
2684 ret = PTR_ERR(filp);
2694 ret = pfm_setup_buffer_fmt(
current, filp, ctx, ctx_flags, 0, req);
2699 DPRINT((
"ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2704 ctx->ctx_fl_excl_idle,
2711 pfm_reset_pmu_state(ctx);
2722 if (ctx->ctx_buf_fmt) {
2723 pfm_buf_fmt_exit(ctx->ctx_buf_fmt,
current,
NULL, regs);
2726 pfm_context_free(ctx);
2733 static inline unsigned long
2734 pfm_new_counter_value (pfm_counter_t *
reg,
int is_long_reset)
2736 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2737 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2738 extern unsigned long carta_random32 (
unsigned long seed);
2741 new_seed = carta_random32(old_seed);
2742 val -= (old_seed &
mask);
2743 if ((mask >> 32) != 0)
2745 new_seed |= carta_random32(old_seed >> 32) << 32;
2746 reg->seed = new_seed;
2753 pfm_reset_regs_masked(pfm_context_t *ctx,
unsigned long *ovfl_regs,
int is_long_reset)
2755 unsigned long mask = ovfl_regs[0];
2756 unsigned long reset_others = 0
UL;
2766 if ((mask & 0x1UL) == 0
UL)
continue;
2768 ctx->ctx_pmds[
i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2769 reset_others |= ctx->ctx_pmds[
i].reset_pmds[0];
2771 DPRINT_ovfl((
" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ?
"long" :
"short", i, val));
2777 for(i = 0; reset_others; i++, reset_others >>= 1) {
2779 if ((reset_others & 0x1) == 0)
continue;
2781 ctx->ctx_pmds[
i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2784 is_long_reset ?
"long" :
"short", i, val));
2789 pfm_reset_regs(pfm_context_t *ctx,
unsigned long *ovfl_regs,
int is_long_reset)
2791 unsigned long mask = ovfl_regs[0];
2792 unsigned long reset_others = 0
UL;
2796 DPRINT_ovfl((
"ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2798 if (ctx->ctx_state == PFM_CTX_MASKED) {
2799 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2809 if ((mask & 0x1UL) == 0
UL)
continue;
2811 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2812 reset_others |= ctx->ctx_pmds[
i].reset_pmds[0];
2814 DPRINT_ovfl((
" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ?
"long" :
"short", i, val));
2816 pfm_write_soft_counter(ctx, i, val);
2822 for(i = 0; reset_others; i++, reset_others >>= 1) {
2824 if ((reset_others & 0x1) == 0)
continue;
2826 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2828 if (PMD_IS_COUNTING(i)) {
2829 pfm_write_soft_counter(ctx, i, val);
2834 is_long_reset ?
"long" :
"short", i, val));
2840 pfm_write_pmcs(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
2844 unsigned long value, pmc_pm;
2845 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2847 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2848 int is_monitor, is_counting,
state;
2850 pfm_reg_check_t wr_func;
2851 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2853 state = ctx->ctx_state;
2854 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2855 is_system = ctx->ctx_fl_system;
2856 task = ctx->ctx_task;
2857 impl_pmds = pmu_conf->impl_pmds[0];
2859 if (state == PFM_CTX_ZOMBIE)
return -
EINVAL;
2868 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
2871 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2875 for (i = 0; i <
count; i++, req++) {
2886 DPRINT((
"pmc%u is invalid\n", cnum));
2890 pmc_type = pmu_conf->pmc_desc[cnum].type;
2891 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2892 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2893 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2900 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2901 DPRINT((
"pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2904 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2910 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2911 DPRINT((
"pmc%u pmc_pm=%lu is_system=%d\n",
2923 value |= 1 << PMU_PMC_OI;
2932 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2933 DPRINT((
"invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2938 if ((reset_pmds & impl_pmds) != reset_pmds) {
2939 DPRINT((
"invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2943 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2944 DPRINT((
"cannot set ovfl_notify or random on pmc%u\n", cnum));
2953 if (
likely(expert_mode == 0 && wr_func)) {
2955 if (ret)
goto error;
2975 ctx->ctx_pmds[cnum].flags =
flags;
2977 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2978 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2992 CTX_USED_PMD(ctx, reset_pmds);
2993 CTX_USED_PMD(ctx, smpl_pmds);
2998 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1
UL << cnum;
3005 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3019 if (is_monitor) CTX_USED_MONITOR(ctx, 1
UL << cnum);
3024 ctx->ctx_pmcs[cnum] =
value;
3030 if (is_system == 0) ctx->th_pmcs[cnum] =
value;
3035 if (can_access_pmu) {
3047 ctx->ctx_reload_pmcs[0] |= 1
UL << cnum;
3052 DPRINT((
"pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3058 ctx->ctx_all_pmcs[0],
3059 ctx->ctx_used_pmds[0],
3060 ctx->ctx_pmds[cnum].eventid,
3063 ctx->ctx_reload_pmcs[0],
3064 ctx->ctx_used_monitors[0],
3065 ctx->ctx_ovfl_regs[0]));
3080 pfm_write_pmds(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3084 unsigned long value, hw_value, ovfl_mask;
3086 int i, can_access_pmu = 0,
state;
3087 int is_counting, is_loaded, is_system, expert_mode;
3089 pfm_reg_check_t wr_func;
3092 state = ctx->ctx_state;
3093 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3094 is_system = ctx->ctx_fl_system;
3095 ovfl_mask = pmu_conf->ovfl_val;
3096 task = ctx->ctx_task;
3111 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
3114 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3118 for (i = 0; i <
count; i++, req++) {
3123 if (!PMD_IS_IMPL(cnum)) {
3124 DPRINT((
"pmd[%u] is unimplemented or invalid\n", cnum));
3127 is_counting = PMD_IS_COUNTING(cnum);
3128 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3133 if (
unlikely(expert_mode == 0 && wr_func)) {
3137 if (ret)
goto abort_mission;
3160 ctx->ctx_pmds[cnum].lval =
value;
3166 hw_value = value & ovfl_mask;
3167 value = value & ~ovfl_mask;
3185 ctx->ctx_pmds[cnum].val =
value;
3193 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3198 CTX_USED_PMD(ctx, RDEP(cnum));
3204 if (is_counting && state == PFM_CTX_MASKED) {
3205 ctx->ctx_ovfl_regs[0] &= ~1
UL << cnum;
3212 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3217 if (can_access_pmu) {
3226 ctx->ctx_reload_pmds[0] |= 1
UL << cnum;
3231 DPRINT((
"pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3232 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3238 ctx->ctx_pmds[cnum].val,
3239 ctx->ctx_pmds[cnum].short_reset,
3240 ctx->ctx_pmds[cnum].long_reset,
3241 PMC_OVFL_NOTIFY(ctx, cnum) ?
'Y':
'N',
3242 ctx->ctx_pmds[cnum].seed,
3243 ctx->ctx_pmds[cnum].mask,
3244 ctx->ctx_used_pmds[0],
3245 ctx->ctx_pmds[cnum].reset_pmds[0],
3246 ctx->ctx_reload_pmds[0],
3247 ctx->ctx_all_pmds[0],
3248 ctx->ctx_ovfl_regs[0]));
3276 pfm_read_pmds(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3279 unsigned long val = 0
UL, lval, ovfl_mask, sval;
3281 unsigned int cnum, reg_flags = 0;
3282 int i, can_access_pmu = 0,
state;
3283 int is_loaded, is_system, is_counting, expert_mode;
3285 pfm_reg_check_t rd_func;
3292 state = ctx->ctx_state;
3293 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3294 is_system = ctx->ctx_fl_system;
3295 ovfl_mask = pmu_conf->ovfl_val;
3296 task = ctx->ctx_task;
3298 if (state == PFM_CTX_ZOMBIE)
return -
EINVAL;
3307 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
3313 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3319 DPRINT((
"ld=%d apmu=%d ctx_state=%d\n",
3329 for (i = 0; i <
count; i++, req++) {
3345 sval = ctx->ctx_pmds[cnum].val;
3346 lval = ctx->ctx_pmds[cnum].lval;
3347 is_counting = PMD_IS_COUNTING(cnum);
3354 if (can_access_pmu){
3362 val = is_loaded ? ctx->th_pmds[cnum] : 0
UL;
3364 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3377 if (
unlikely(expert_mode == 0 && rd_func)) {
3378 unsigned long v =
val;
3379 ret = (*rd_func)(ctx->ctx_task,
ctx, cnum, &
v,
regs);
3380 if (ret)
goto error;
3385 PFM_REG_RETFLAG_SET(reg_flags, 0);
3387 DPRINT((
"pmd[%u]=0x%lx\n", cnum, val));
3413 ctx = GET_PMU_CTX();
3421 if (task !=
current && ctx->ctx_fl_system == 0)
return -
EBUSY;
3423 return pfm_write_pmcs(ctx, req, nreq, regs);
3434 ctx = GET_PMU_CTX();
3442 if (task !=
current && ctx->ctx_fl_system == 0)
return -
EBUSY;
3444 return pfm_read_pmds(ctx, req, nreq, regs);
3455 pfm_context_t *ctx = task->
thread.pfm_context;
3456 unsigned long flags;
3459 if (pmu_conf->use_rr_dbregs == 0)
return 0;
3461 DPRINT((
"called for [%d]\n", task_pid_nr(task)));
3476 if (ctx && ctx->ctx_fl_using_dbreg == 1)
return -1;
3484 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3487 pfm_sessions.pfs_ptrace_use_dbregs++;
3489 DPRINT((
"ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3490 pfm_sessions.pfs_ptrace_use_dbregs,
3491 pfm_sessions.pfs_sys_use_dbregs,
3492 task_pid_nr(task), ret));
3510 unsigned long flags;
3513 if (pmu_conf->use_rr_dbregs == 0)
return 0;
3516 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3517 printk(
KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3520 pfm_sessions.pfs_ptrace_use_dbregs--;
3529 pfm_restart(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3534 int state, is_system;
3537 state = ctx->ctx_state;
3538 fmt = ctx->ctx_buf_fmt;
3539 is_system = ctx->ctx_fl_system;
3540 task = PFM_CTX_TASK(ctx);
3543 case PFM_CTX_MASKED:
3545 case PFM_CTX_LOADED:
3548 case PFM_CTX_UNLOADED:
3549 case PFM_CTX_ZOMBIE:
3550 DPRINT((
"invalid state=%d\n", state));
3553 DPRINT((
"state=%d, cannot operate (no active_restart handler)\n", state));
3563 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
3573 if (task ==
current || is_system) {
3575 fmt = ctx->ctx_buf_fmt;
3577 DPRINT((
"restarting self %d ovfl=0x%lx\n",
3579 ctx->ctx_ovfl_regs[0]));
3581 if (CTX_HAS_SMPL(ctx)) {
3585 rst_ctrl.
bits.mask_monitoring = 0;
3586 rst_ctrl.
bits.reset_ovfl_pmds = 0;
3588 if (state == PFM_CTX_LOADED)
3589 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3591 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3593 rst_ctrl.
bits.mask_monitoring = 0;
3594 rst_ctrl.
bits.reset_ovfl_pmds = 1;
3598 if (rst_ctrl.
bits.reset_ovfl_pmds)
3601 if (rst_ctrl.
bits.mask_monitoring == 0) {
3602 DPRINT((
"resuming monitoring for [%d]\n", task_pid_nr(task)));
3604 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3606 DPRINT((
"keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3614 ctx->ctx_ovfl_regs[0] = 0
UL;
3619 ctx->ctx_state = PFM_CTX_LOADED;
3624 ctx->ctx_fl_can_restart = 0;
3637 if (state == PFM_CTX_MASKED) {
3638 if (ctx->ctx_fl_can_restart == 0)
return -
EINVAL;
3643 ctx->ctx_fl_can_restart = 0;
3662 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3663 DPRINT((
"unblocking [%d]\n", task_pid_nr(task)));
3666 DPRINT((
"[%d] armed exit trap\n", task_pid_nr(task)));
3668 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3670 PFM_SET_WORK_PENDING(task, 1);
3672 set_notify_resume(task);
3682 pfm_debug(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3684 unsigned int m = *(
unsigned int *)arg;
3686 pfm_sysctl.
debug = m == 0 ? 0 : 1;
3691 memset(pfm_stats, 0,
sizeof(pfm_stats));
3692 for(m=0; m <
NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0
UL;
3701 pfm_write_ibr_dbr(
int mode, pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3706 unsigned long flags;
3711 int i, can_access_pmu = 0;
3712 int is_system, is_loaded;
3714 if (pmu_conf->use_rr_dbregs == 0)
return -
EINVAL;
3716 state = ctx->ctx_state;
3717 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3718 is_system = ctx->ctx_fl_system;
3719 task = ctx->ctx_task;
3721 if (state == PFM_CTX_ZOMBIE)
return -
EINVAL;
3735 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
3738 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3748 first_time = ctx->ctx_fl_using_dbreg == 0;
3754 DPRINT((
"debug registers already in use for [%d]\n", task_pid_nr(task)));
3768 if (first_time && is_system) {
3769 if (pfm_sessions.pfs_ptrace_use_dbregs)
3772 pfm_sessions.pfs_sys_use_dbregs++;
3777 if (ret != 0)
return ret;
3783 ctx->ctx_fl_using_dbreg = 1;
3794 if (first_time && can_access_pmu) {
3795 DPRINT((
"[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3796 for (i=0; i < pmu_conf->num_ibrs; i++) {
3801 for (i=0; i < pmu_conf->num_dbrs; i++) {
3802 ia64_set_dbr(i, 0
UL);
3811 for (i = 0; i <
count; i++, req++) {
3818 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3819 DPRINT((
"invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3820 rnum, dbreg.val, mode, i, count));
3829 if (mode == PFM_CODE_RR)
3830 dbreg.ibr.ibr_x = 0;
3832 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3847 if (mode == PFM_CODE_RR) {
3848 CTX_USED_IBR(ctx, rnum);
3850 if (can_access_pmu) {
3855 ctx->ctx_ibrs[
rnum] = dbreg.val;
3857 DPRINT((
"write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3858 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3860 CTX_USED_DBR(ctx, rnum);
3862 if (can_access_pmu) {
3863 ia64_set_dbr(rnum, dbreg.val);
3866 ctx->ctx_dbrs[
rnum] = dbreg.val;
3868 DPRINT((
"write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3869 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3881 if (ctx->ctx_fl_system) {
3882 pfm_sessions.pfs_sys_use_dbregs--;
3885 ctx->ctx_fl_using_dbreg = 0;
3896 pfm_write_ibrs(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3898 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3902 pfm_write_dbrs(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3904 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3914 ctx = GET_PMU_CTX();
3922 if (task !=
current && ctx->ctx_fl_system == 0)
return -
EBUSY;
3924 return pfm_write_ibrs(ctx, req, nreq, regs);
3935 ctx = GET_PMU_CTX();
3943 if (task !=
current && ctx->ctx_fl_system == 0)
return -
EBUSY;
3945 return pfm_write_dbrs(ctx, req, nreq, regs);
3951 pfm_get_features(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3960 pfm_stop(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
3964 int state, is_system;
3966 state = ctx->ctx_state;
3967 is_system = ctx->ctx_fl_system;
3972 if (state == PFM_CTX_UNLOADED)
return -
EINVAL;
3980 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
3983 DPRINT((
"task [%d] ctx_state=%d is_system=%d\n",
3984 task_pid_nr(PFM_CTX_TASK(ctx)),
4041 ctx->ctx_saved_psr_up = 0;
4042 DPRINT((
"task=[%d]\n", task_pid_nr(task)));
4049 pfm_start(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
4052 int state, is_system;
4054 state = ctx->ctx_state;
4055 is_system = ctx->ctx_fl_system;
4057 if (state != PFM_CTX_LOADED)
return -
EINVAL;
4065 DPRINT((
"should be running on CPU%d\n", ctx->ctx_cpu));
4102 if (ctx->ctx_task ==
current) {
4130 pfm_get_pmc_reset(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
4137 for (i = 0; i <
count; i++, req++) {
4141 if (!PMC_IS_IMPL(cnum))
goto abort_mission;
4157 pfm_check_task_exist(pfm_context_t *ctx)
4165 if (t->
thread.pfm_context == ctx) {
4173 DPRINT((
"pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4179 pfm_context_load(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
4183 struct pfm_context_t *old;
4184 unsigned long flags;
4189 unsigned long *pmcs_source, *pmds_source;
4192 int state, is_system, set_dbregs = 0;
4194 state = ctx->ctx_state;
4195 is_system = ctx->ctx_fl_system;
4199 if (state != PFM_CTX_UNLOADED) {
4200 DPRINT((
"cannot load to [%d], invalid ctx_state=%d\n",
4206 DPRINT((
"load_pid [%d] using_dbreg=%d\n", req->
load_pid, ctx->ctx_fl_using_dbreg));
4209 DPRINT((
"cannot use blocking mode on self\n"));
4213 ret = pfm_get_task(ctx, req->
load_pid, &task);
4224 if (is_system && task !=
current) {
4225 DPRINT((
"system wide is self monitoring only load_pid=%d\n",
4237 if (ctx->ctx_fl_using_dbreg) {
4240 DPRINT((
"load_pid [%d] task is debugged, cannot load range restrictions\n", req->
load_pid));
4246 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4247 DPRINT((
"cannot load [%d] dbregs in use\n",
4248 task_pid_nr(task)));
4251 pfm_sessions.pfs_sys_use_dbregs++;
4252 DPRINT((
"load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4259 if (ret)
goto error;
4283 ret = pfm_reserve_session(
current, is_system, the_cpu);
4284 if (ret)
goto error;
4295 DPRINT((
"before cmpxchg() old_ctx=%p new_ctx=%p\n",
4296 thread->pfm_context, ctx));
4299 old =
ia64_cmpxchg(acq, &thread->pfm_context,
NULL, ctx,
sizeof(pfm_context_t *));
4301 DPRINT((
"load_pid [%d] already has a context\n", req->
load_pid));
4305 pfm_reset_msgq(ctx);
4307 ctx->ctx_state = PFM_CTX_LOADED;
4312 ctx->ctx_task =
task;
4329 pfm_copy_pmds(task, ctx);
4330 pfm_copy_pmcs(task, ctx);
4332 pmcs_source = ctx->th_pmcs;
4333 pmds_source = ctx->th_pmds;
4340 if (is_system == 0) {
4344 DPRINT((
"clearing psr.sp for [%d]\n", task_pid_nr(task)));
4348 SET_ACTIVATION(ctx);
4353 owner_task = GET_PMU_OWNER();
4354 if (owner_task) pfm_lazy_save_regs(owner_task);
4361 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4362 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4364 ctx->ctx_reload_pmcs[0] = 0
UL;
4365 ctx->ctx_reload_pmds[0] = 0
UL;
4370 if (ctx->ctx_fl_using_dbreg) {
4371 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4372 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4377 SET_PMU_OWNER(task, ctx);
4379 DPRINT((
"context loaded on PMU for [%d]\n", task_pid_nr(task)));
4387 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4388 SET_LAST_CPU(ctx, -1);
4391 ctx->ctx_saved_psr_up = 0
UL;
4398 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4403 if (ret && set_dbregs) {
4405 pfm_sessions.pfs_sys_use_dbregs--;
4411 if (is_system == 0 && task !=
current) {
4415 ret = pfm_check_task_exist(ctx);
4417 ctx->ctx_state = PFM_CTX_UNLOADED;
4418 ctx->ctx_task =
NULL;
4433 static void pfm_flush_pmds(
struct task_struct *, pfm_context_t *ctx);
4436 pfm_context_unload(pfm_context_t *ctx,
void *arg,
int count,
struct pt_regs *regs)
4443 DPRINT((
"ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4445 prev_state = ctx->ctx_state;
4446 is_system = ctx->ctx_fl_system;
4451 if (prev_state == PFM_CTX_UNLOADED) {
4452 DPRINT((
"ctx_state=%d, nothing to do\n", prev_state));
4459 ret = pfm_stop(ctx,
NULL, 0, regs);
4460 if (ret)
return ret;
4462 ctx->ctx_state = PFM_CTX_UNLOADED;
4489 if (prev_state != PFM_CTX_ZOMBIE)
4490 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4499 ctx->ctx_task =
NULL;
4518 DPRINT((
"setting psr.sp for [%d]\n", task_pid_nr(task)));
4524 pfm_flush_pmds(task, ctx);
4532 if (prev_state != PFM_CTX_ZOMBIE)
4533 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4538 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4539 SET_LAST_CPU(ctx, -1);
4550 ctx->ctx_task =
NULL;
4552 PFM_SET_WORK_PENDING(task, 0);
4554 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4555 ctx->ctx_fl_can_restart = 0;
4556 ctx->ctx_fl_going_zombie = 0;
4558 DPRINT((
"disconnected [%d] from context\n", task_pid_nr(task)));
4572 unsigned long flags;
4577 ctx = PFM_GET_CTX(task);
4579 PROTECT_CTX(ctx, flags);
4581 DPRINT((
"state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4583 state = ctx->ctx_state;
4585 case PFM_CTX_UNLOADED:
4590 printk(
KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4592 case PFM_CTX_LOADED:
4593 case PFM_CTX_MASKED:
4594 ret = pfm_context_unload(ctx,
NULL, 0, regs);
4596 printk(
KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4598 DPRINT((
"ctx unloaded for current state was %d\n", state));
4600 pfm_end_notify_user(ctx);
4602 case PFM_CTX_ZOMBIE:
4603 ret = pfm_context_unload(ctx,
NULL, 0, regs);
4605 printk(
KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4610 printk(
KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4613 UNPROTECT_CTX(ctx, flags);
4615 {
u64 psr = pfm_get_psr();
4626 if (free_ok) pfm_context_free(ctx);
4632 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4633 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4634 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4635 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4636 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4638 static pfm_cmd_desc_t pfm_cmd_tab[]={
4640 PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY,
pfarg_reg_t,
NULL),
4641 PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY,
pfarg_reg_t,
NULL),
4642 PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY,
pfarg_reg_t,
NULL),
4643 PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4644 PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4647 PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1,
pfarg_context_t, pfm_ctx_getsize),
4649 PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4652 PFM_CMD(pfm_debug, 0, 1,
unsigned int,
NULL),
4654 PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY,
pfarg_reg_t,
NULL),
4656 PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4674 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4677 pfm_check_task_state(pfm_context_t *ctx,
int cmd,
unsigned long flags)
4680 int state, old_state;
4683 state = ctx->ctx_state;
4684 task = ctx->ctx_task;
4687 DPRINT((
"context %d no task, state=%d\n", ctx->ctx_fd, state));
4691 DPRINT((
"context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4695 task->
state, PFM_CMD_STOPPED(cmd)));
4704 if (task ==
current || ctx->ctx_fl_system)
return 0;
4710 case PFM_CTX_UNLOADED:
4715 case PFM_CTX_ZOMBIE:
4719 DPRINT((
"cmd %d state zombie cannot operate on context\n", cmd));
4721 case PFM_CTX_MASKED:
4739 if (PFM_CMD_STOPPED(cmd)) {
4741 DPRINT((
"[%d] task not in stopped state\n", task_pid_nr(task)));
4760 UNPROTECT_CTX(ctx, flags);
4762 wait_task_inactive(task, 0);
4764 PROTECT_CTX(ctx, flags);
4769 if (ctx->ctx_state != old_state) {
4770 DPRINT((
"old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4783 struct fd f = {
NULL, 0};
4784 pfm_context_t *ctx =
NULL;
4785 unsigned long flags = 0
UL;
4786 void *args_k =
NULL;
4788 size_t base_sz, sz, xtra_sz = 0;
4789 int narg, completed_args = 0, call_made = 0, cmd_flags;
4791 int (*getsize)(
void *
arg,
size_t *sz);
4792 #define PFM_MAX_ARGSIZE 4096
4800 DPRINT((
"invalid cmd=%d\n", cmd));
4804 func = pfm_cmd_tab[
cmd].cmd_func;
4805 narg = pfm_cmd_tab[
cmd].cmd_narg;
4806 base_sz = pfm_cmd_tab[
cmd].cmd_argsize;
4807 getsize = pfm_cmd_tab[
cmd].cmd_getsize;
4808 cmd_flags = pfm_cmd_tab[
cmd].cmd_flags;
4811 DPRINT((
"invalid cmd=%d\n", cmd));
4815 DPRINT((
"cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4825 if (
unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4829 sz = xtra_sz + base_sz*
count;
4833 if (
unlikely(sz > PFM_MAX_ARGSIZE)) {
4854 DPRINT((
"cannot copy_from_user %lu bytes @%p\n", sz, arg));
4861 if (completed_args == 0 && getsize) {
4865 ret = (*getsize)(args_k, &xtra_sz);
4866 if (ret)
goto error_args;
4870 DPRINT((
"restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4873 if (
likely(xtra_sz))
goto restart_args;
4876 if (
unlikely((cmd_flags & PFM_CMD_FD) == 0))
goto skip_fd;
4882 DPRINT((
"invalid fd %d\n", fd));
4886 DPRINT((
"fd %d not related to perfmon\n", fd));
4890 ctx = f.
file->private_data;
4892 DPRINT((
"no context for fd %d\n", fd));
4897 PROTECT_CTX(ctx, flags);
4902 ret = pfm_check_task_state(ctx, cmd, flags);
4903 if (
unlikely(ret))
goto abort_locked;
4912 DPRINT((
"context unlocked\n"));
4913 UNPROTECT_CTX(ctx, flags);
4917 if (call_made && PFM_CMD_RW_ARG(cmd) &&
copy_to_user(arg, args_k, base_sz*count)) ret = -
EFAULT;
4925 DPRINT((
"cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4931 pfm_resume_after_ovfl(pfm_context_t *ctx,
unsigned long ovfl_regs,
struct pt_regs *regs)
4938 state = ctx->ctx_state;
4943 if (CTX_HAS_SMPL(ctx)) {
4945 rst_ctrl.
bits.mask_monitoring = 0;
4946 rst_ctrl.
bits.reset_ovfl_pmds = 0;
4948 if (state == PFM_CTX_LOADED)
4949 ret = pfm_buf_fmt_restart_active(fmt,
current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4951 ret = pfm_buf_fmt_restart(fmt,
current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4953 rst_ctrl.
bits.mask_monitoring = 0;
4954 rst_ctrl.
bits.reset_ovfl_pmds = 1;
4958 if (rst_ctrl.
bits.reset_ovfl_pmds) {
4961 if (rst_ctrl.
bits.mask_monitoring == 0) {
4962 DPRINT((
"resuming monitoring\n"));
4963 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(
current);
4965 DPRINT((
"stopping monitoring\n"));
4968 ctx->ctx_state = PFM_CTX_LOADED;
4977 pfm_context_force_terminate(pfm_context_t *ctx,
struct pt_regs *regs)
4983 ret = pfm_context_unload(ctx,
NULL, 0, regs);
4985 printk(
KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(
current), ret);
5000 static int pfm_ovfl_notify_user(pfm_context_t *ctx,
unsigned long ovfl_pmds);
5017 unsigned long ovfl_regs;
5028 PROTECT_CTX(ctx, flags);
5030 PFM_SET_WORK_PENDING(
current, 0);
5037 reason = ctx->ctx_fl_trap_reason;
5038 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5039 ovfl_regs = ctx->ctx_ovfl_regs[0];
5041 DPRINT((
"reason=%d state=%d\n", reason, ctx->ctx_state));
5046 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5050 if (reason == PFM_TRAP_REASON_RESET)
5057 UNPROTECT_CTX(ctx, flags);
5064 DPRINT((
"before block sleeping\n"));
5072 DPRINT((
"after block sleeping ret=%d\n", ret));
5080 PROTECT_CTX(ctx, dummy_flags);
5088 ovfl_regs = ctx->ctx_ovfl_regs[0];
5090 if (ctx->ctx_fl_going_zombie) {
5092 DPRINT((
"context is zombie, bailing out\n"));
5093 pfm_context_force_terminate(ctx, regs);
5103 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5104 ctx->ctx_ovfl_regs[0] = 0
UL;
5110 UNPROTECT_CTX(ctx, flags);
5114 pfm_notify_user(pfm_context_t *ctx,
pfm_msg_t *msg)
5116 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5117 DPRINT((
"ignoring overflow notification, owner is zombie\n"));
5121 DPRINT((
"waking up somebody\n"));
5135 pfm_ovfl_notify_user(pfm_context_t *ctx,
unsigned long ovfl_pmds)
5139 if (ctx->ctx_fl_no_msg == 0) {
5140 msg = pfm_get_new_msg(ctx);
5142 printk(
KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5156 DPRINT((
"ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5162 return pfm_notify_user(ctx, msg);
5166 pfm_end_notify_user(pfm_context_t *ctx)
5170 msg = pfm_get_new_msg(ctx);
5172 printk(
KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5176 memset(msg, 0,
sizeof(*msg));
5182 DPRINT((
"end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5187 return pfm_notify_user(ctx, msg);
5194 static void pfm_overflow_handler(
struct task_struct *task, pfm_context_t *ctx,
5195 unsigned long pmc0,
struct pt_regs *regs)
5199 unsigned long old_val, ovfl_val, new_val;
5200 unsigned long ovfl_notify = 0
UL, ovfl_pmds = 0
UL, smpl_pmds = 0
UL, reset_pmds;
5203 unsigned int i, has_smpl;
5204 int must_notify = 0;
5206 if (
unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE))
goto stop_monitoring;
5211 if (
unlikely((pmc0 & 0x1) == 0))
goto sanity_check;
5213 tstamp = ia64_get_itc();
5215 ovfl_val = pmu_conf->ovfl_val;
5216 has_smpl = CTX_HAS_SMPL(ctx);
5219 "used_pmds=0x%lx\n",
5221 task ? task_pid_nr(task): -1,
5222 (regs ? regs->
cr_iip : 0),
5223 CTX_OVFL_NOBLOCK(ctx) ?
"nonblocking" :
"blocking",
5224 ctx->ctx_used_pmds[0]));
5231 for (i = PMU_FIRST_COUNTER;
mask ; i++, mask >>= 1) {
5234 if ((mask & 0x1) == 0)
continue;
5242 old_val = new_val = ctx->ctx_pmds[
i].val;
5243 new_val += 1 + ovfl_val;
5244 ctx->ctx_pmds[
i].val = new_val;
5249 if (
likely(old_val > new_val)) {
5250 ovfl_pmds |= 1
UL <<
i;
5251 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1
UL <<
i;
5254 DPRINT_ovfl((
"ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5266 if (ovfl_pmds == 0
UL)
return;
5279 unsigned long start_cycles, end_cycles;
5280 unsigned long pmd_mask;
5285 ovfl_arg = &ctx->ctx_ovfl_arg;
5289 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5293 if ((pmd_mask & 0x1) == 0)
continue;
5296 ovfl_arg->
ovfl_notify = ovfl_notify & mask ? 1 : 0;
5299 ovfl_arg->
smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[
i].smpl_pmds[0];
5310 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5311 if ((smpl_pmds & 0x1) == 0)
continue;
5317 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5319 start_cycles = ia64_get_itc();
5324 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5326 end_cycles = ia64_get_itc();
5340 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5345 if (ret && pmd_mask) {
5346 DPRINT((
"handler aborts leftover ovfl_pmds=0x%lx\n",
5347 pmd_mask<<PMU_FIRST_COUNTER));
5352 ovfl_pmds &= ~reset_pmds;
5358 ovfl_ctrl.
bits.notify_user = ovfl_notify ? 1 : 0;
5359 ovfl_ctrl.
bits.block_task = ovfl_notify ? 1 : 0;
5360 ovfl_ctrl.
bits.mask_monitoring = ovfl_notify ? 1 : 0;
5361 ovfl_ctrl.
bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5365 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5368 DPRINT_ovfl((
"ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5374 unsigned long bm = reset_pmds;
5378 if (ovfl_notify && ovfl_ctrl.
bits.notify_user) {
5382 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5387 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.
bits.block_task) {
5389 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5394 PFM_SET_WORK_PENDING(task, 1);
5400 set_notify_resume(task);
5409 DPRINT_ovfl((
"owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5410 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5411 PFM_GET_WORK_PENDING(task),
5412 ctx->ctx_fl_trap_reason,
5415 ovfl_ctrl.
bits.mask_monitoring ? 1 : 0));
5419 if (ovfl_ctrl.
bits.mask_monitoring) {
5420 pfm_mask_monitoring(task);
5421 ctx->ctx_state = PFM_CTX_MASKED;
5422 ctx->ctx_fl_can_restart = 1;
5428 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5433 printk(
KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5435 task ? task_pid_nr(task) : -1,
5468 DPRINT((
"ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5476 pfm_do_interrupt_handler(
void *arg,
struct pt_regs *regs)
5480 unsigned long flags;
5485 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5492 task = GET_PMU_OWNER();
5493 ctx = GET_PMU_CTX();
5499 if (PMC0_HAS_OVFL(pmc0) && task) {
5505 if (!ctx)
goto report_spurious1;
5508 goto report_spurious2;
5510 PROTECT_CTX_NOPRINT(ctx, flags);
5512 pfm_overflow_handler(task, ctx, pmc0, regs);
5514 UNPROTECT_CTX_NOPRINT(ctx, flags);
5517 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5528 printk(
KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5529 this_cpu, task_pid_nr(task));
5533 printk(
KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5541 pfm_interrupt_handler(
int irq,
void *arg)
5543 unsigned long start_cycles, total_cycles;
5550 if (
likely(!pfm_alt_intr_handler)) {
5551 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5552 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5554 start_cycles = ia64_get_itc();
5556 ret = pfm_do_interrupt_handler(arg, regs);
5558 total_cycles = ia64_get_itc();
5564 total_cycles -= start_cycles;
5566 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5567 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5569 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5573 (*pfm_alt_intr_handler->
handler)(irq, arg, regs);
5584 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5587 pfm_proc_start(
struct seq_file *m, loff_t *pos)
5590 return PFM_PROC_SHOW_HEADER;
5593 while (*pos <= nr_cpu_ids) {
5595 return (
void *)*
pos;
5603 pfm_proc_next(
struct seq_file *m,
void *v, loff_t *pos)
5606 return pfm_proc_start(m, pos);
5610 pfm_proc_stop(
struct seq_file *m,
void *v)
5615 pfm_proc_show_header(
struct seq_file *m)
5619 unsigned long flags;
5622 "perfmon version : %u.%u\n"
5625 "expert mode : %s\n"
5626 "ovfl_mask : 0x%lx\n"
5627 "PMU flags : 0x%x\n",
5638 "proc_sessions : %u\n"
5639 "sys_sessions : %u\n"
5640 "sys_use_dbregs : %u\n"
5641 "ptrace_use_dbregs : %u\n",
5642 pfm_sessions.pfs_task_sessions,
5643 pfm_sessions.pfs_sys_sessions,
5644 pfm_sessions.pfs_sys_use_dbregs,
5645 pfm_sessions.pfs_ptrace_use_dbregs);
5649 spin_lock(&pfm_buffer_fmt_lock);
5653 seq_printf(m,
"format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5672 spin_unlock(&pfm_buffer_fmt_lock);
5677 pfm_proc_show(
struct seq_file *m,
void *v)
5683 if (v == PFM_PROC_SHOW_HEADER) {
5684 pfm_proc_show_header(m);
5692 "CPU%-2d overflow intrs : %lu\n"
5693 "CPU%-2d overflow cycles : %lu\n"
5694 "CPU%-2d overflow min : %lu\n"
5695 "CPU%-2d overflow max : %lu\n"
5696 "CPU%-2d smpl handler calls : %lu\n"
5697 "CPU%-2d smpl handler cycles : %lu\n"
5698 "CPU%-2d spurious intrs : %lu\n"
5699 "CPU%-2d replay intrs : %lu\n"
5700 "CPU%-2d syst_wide : %d\n"
5701 "CPU%-2d dcr_pp : %d\n"
5702 "CPU%-2d exclude idle : %d\n"
5703 "CPU%-2d owner : %d\n"
5704 "CPU%-2d context : %p\n"
5705 "CPU%-2d activations : %lu\n",
5706 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5707 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5708 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5709 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5710 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5711 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5712 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5713 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5717 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5718 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5719 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5723 psr = pfm_get_psr();
5728 "CPU%-2d psr : 0x%lx\n"
5729 "CPU%-2d pmc0 : 0x%lx\n",
5733 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5734 if (PMC_IS_COUNTING(i) == 0)
continue;
5736 "CPU%-2d pmc%u : 0x%lx\n"
5737 "CPU%-2d pmd%u : 0x%lx\n",
5746 .
start = pfm_proc_start,
5747 .next = pfm_proc_next,
5748 .stop = pfm_proc_stop,
5749 .show = pfm_proc_show
5753 pfm_proc_open(
struct inode *inode,
struct file *file)
5755 return seq_open(file, &pfm_seq_ops);
5770 unsigned long dcr_pp;
5772 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5778 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->
pid) {
5780 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5814 pfm_force_cleanup(pfm_context_t *ctx,
struct pt_regs *regs)
5821 if (GET_PMU_OWNER() == task) {
5822 DPRINT((
"cleared ownership for [%d]\n",
5823 task_pid_nr(ctx->ctx_task)));
5830 PFM_SET_WORK_PENDING(task, 0);
5835 DPRINT((
"force cleanup for [%d]\n", task_pid_nr(task)));
5846 unsigned long flags;
5850 ctx = PFM_GET_CTX(task);
5851 if (ctx ==
NULL)
return;
5858 flags = pfm_protect_ctx_ctxsw(ctx);
5860 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5865 pfm_force_cleanup(ctx, regs);
5867 BUG_ON(ctx->ctx_smpl_hdr);
5869 pfm_unprotect_ctx_ctxsw(ctx, flags);
5871 pfm_context_free(ctx);
5879 psr = pfm_get_psr();
5909 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5921 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5927 pfm_unprotect_ctx_ctxsw(ctx, flags);
5937 ctx = PFM_GET_CTX(task);
5938 if (ctx ==
NULL)
return;
5943 psr = pfm_get_psr();
5966 unsigned long flags;
5968 {
u64 psr = pfm_get_psr();
5969 BUG_ON(psr & IA64_PSR_UP);
5972 ctx = PFM_GET_CTX(task);
5983 PROTECT_CTX(ctx,flags);
5997 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6009 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6016 UNPROTECT_CTX(ctx,flags);
6028 unsigned long pmc_mask = 0
UL, pmd_mask = 0
UL;
6029 unsigned long flags;
6031 int need_irq_resend;
6033 ctx = PFM_GET_CTX(task);
6048 flags = pfm_protect_ctx_ctxsw(ctx);
6049 psr = pfm_get_psr();
6051 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6056 if (
unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6059 BUG_ON(ctx->ctx_smpl_hdr);
6061 pfm_force_cleanup(ctx, regs);
6063 pfm_unprotect_ctx_ctxsw(ctx, flags);
6068 pfm_context_free(ctx);
6077 if (ctx->ctx_fl_using_dbreg) {
6078 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6079 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6084 psr_up = ctx->ctx_saved_psr_up;
6090 if (GET_LAST_CPU(ctx) ==
smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6095 pmc_mask = ctx->ctx_reload_pmcs[0];
6096 pmd_mask = ctx->ctx_reload_pmds[0];
6105 pmd_mask = pfm_sysctl.
fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6114 pmc_mask = ctx->ctx_all_pmcs[0];
6123 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6124 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6130 if (
unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6137 ctx->th_pmcs[0] = 0
UL;
6150 ctx->ctx_reload_pmcs[0] = 0
UL;
6151 ctx->ctx_reload_pmds[0] = 0
UL;
6162 SET_ACTIVATION(ctx);
6167 SET_PMU_OWNER(task, ctx);
6175 if (
likely(psr_up)) pfm_set_psr_up();
6180 pfm_unprotect_ctx_ctxsw(ctx, flags);
6192 unsigned long pmd_mask, pmc_mask;
6194 int need_irq_resend;
6196 owner = GET_PMU_OWNER();
6197 ctx = PFM_GET_CTX(task);
6198 psr = pfm_get_psr();
6211 if (ctx->ctx_fl_using_dbreg) {
6212 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6213 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6219 psr_up = ctx->ctx_saved_psr_up;
6220 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6230 if (
likely(owner == task)) {
6231 if (
likely(psr_up)) pfm_set_psr_up();
6241 if (owner) pfm_lazy_save_regs(owner);
6249 pmd_mask = pfm_sysctl.
fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6258 pmc_mask = ctx->ctx_all_pmcs[0];
6260 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6261 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6267 if (
unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6275 ctx->th_pmcs[0] = 0
UL;
6288 SET_PMU_OWNER(task, ctx);
6296 if (
likely(psr_up)) pfm_set_psr_up();
6304 pfm_flush_pmds(
struct task_struct *task, pfm_context_t *ctx)
6308 int i, can_access_pmu = 0;
6315 is_self = ctx->ctx_task == task ? 1 : 0;
6324 can_access_pmu = (GET_PMU_OWNER() ==
task) || (ctx->ctx_fl_system && ctx->ctx_cpu ==
smp_processor_id());
6325 if (can_access_pmu) {
6335 DPRINT((
"releasing ownership\n"));
6350 pmc0 = ctx->th_pmcs[0];
6354 ctx->th_pmcs[0] = 0;
6356 ovfl_val = pmu_conf->ovfl_val;
6363 mask2 = ctx->ctx_used_pmds[0];
6365 DPRINT((
"is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6367 for (i = 0; mask2; i++, mask2>>=1) {
6370 if ((mask2 & 0x1) == 0)
continue;
6375 val = pmd_val = can_access_pmu ?
ia64_get_pmd(i) : ctx->th_pmds[i];
6377 if (PMD_IS_COUNTING(i)) {
6378 DPRINT((
"[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6381 ctx->ctx_pmds[i].val,
6387 val = ctx->ctx_pmds[
i].val + (val & ovfl_val);
6399 if (pmc0 & (1
UL << i)) {
6400 val += 1 + ovfl_val;
6401 DPRINT((
"[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6405 DPRINT((
"[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6407 if (is_self) ctx->th_pmds[
i] =
pmd_val;
6409 ctx->ctx_pmds[
i].val =
val;
6413 static struct irqaction perfmon_irqaction = {
6414 .
handler = pfm_interrupt_handler,
6420 pfm_alt_save_pmu_state(
void *
data)
6446 pfm_alt_restore_pmu_state(
void *data)
6480 if (pfm_alt_intr_handler)
return -
EBUSY;
6483 if (!spin_trylock(&pfm_alt_install_check)) {
6489 ret = pfm_reserve_session(
NULL, 1, reserve_cpu);
6490 if (ret)
goto cleanup_reserve;
6496 DPRINT((
"on_each_cpu() failed: %d\n", ret));
6497 goto cleanup_reserve;
6501 pfm_alt_intr_handler = hdl;
6503 spin_unlock(&pfm_alt_install_check);
6510 if (i >= reserve_cpu)
break;
6512 pfm_unreserve_session(
NULL, 1, i);
6515 spin_unlock(&pfm_alt_install_check);
6530 if (pfm_alt_intr_handler != hdl)
return -
EINVAL;
6533 if (!spin_trylock(&pfm_alt_install_check)) {
6537 pfm_alt_intr_handler =
NULL;
6541 DPRINT((
"on_each_cpu() failed: %d\n", ret));
6545 pfm_unreserve_session(
NULL, 1, i);
6548 spin_unlock(&pfm_alt_install_check);
6557 static int init_pfm_fs(
void);
6570 if ((*p)->probe() == 0)
goto found;
6571 }
else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6583 .
open = pfm_proc_open,
6592 unsigned int n, n_counters,
i;
6594 printk(
"perfmon: version %u.%u IRQ %u\n",
6599 if (pfm_probe_pmu()) {
6600 printk(
KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6610 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6611 if (PMC_IS_IMPL(i) == 0)
continue;
6612 pmu_conf->impl_pmcs[i>>6] |= 1
UL << (i&63);
6615 pmu_conf->num_pmcs =
n;
6617 n = 0; n_counters = 0;
6618 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6619 if (PMD_IS_IMPL(i) == 0)
continue;
6620 pmu_conf->impl_pmds[i>>6] |= 1
UL << (i&63);
6622 if (PMD_IS_COUNTING(i)) n_counters++;
6624 pmu_conf->num_pmds =
n;
6625 pmu_conf->num_counters = n_counters;
6630 if (pmu_conf->use_rr_dbregs) {
6632 printk(
KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6637 printk(
KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6643 printk(
"perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6647 pmu_conf->num_counters,
6648 ffz(pmu_conf->ovfl_val));
6651 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6652 printk(
KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6660 perfmon_dir = proc_create(
"perfmon",
S_IRUGO,
NULL, &pfm_proc_fops);
6661 if (perfmon_dir ==
NULL) {
6662 printk(
KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6680 for(i=0; i <
NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0
UL;
6693 static int first_time=1;
6719 dump_pmu_state(
const char *
from)
6731 info = PFM_CPUINFO_GET();
6739 printk(
"CPU%d from %s() current [%d] iip=0x%lx %s\n",
6746 task = GET_PMU_OWNER();
6747 ctx = GET_PMU_CTX();
6749 printk(
"->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6751 psr = pfm_get_psr();
6753 printk(
"->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6757 psr & IA64_PSR_UP ? 1 : 0,
6758 dcr & IA64_DCR_PP ? 1 : 0,
6766 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6767 if (PMC_IS_IMPL(i) == 0)
continue;
6768 printk(
"->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i,
ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6771 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6772 if (PMD_IS_IMPL(i) == 0)
continue;
6773 printk(
"->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i,
ia64_get_pmd(i), i, ctx->th_pmds[i]);
6777 printk(
"->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6780 ctx->ctx_smpl_vaddr,
6784 ctx->ctx_saved_psr_up);
6797 DPRINT((
"perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6804 thread->pfm_context =
NULL;
6806 PFM_SET_WORK_PENDING(task, 0);