16 #include <linux/export.h>
25 #include <linux/slab.h>
27 #include <asm/sections.h>
28 #include <asm/irq_regs.h>
29 #include <asm/ptrace.h>
34 #define PROFILE_GRPSHIFT 3
35 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
36 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
37 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
43 static unsigned long prof_len, prof_shift;
55 int profile_setup(
char *
str)
57 static char schedstr[] =
"schedule";
58 static char sleepstr[] =
"sleep";
59 static char kvmstr[] =
"kvm";
63 #ifdef CONFIG_SCHEDSTATS
65 if (str[
strlen(sleepstr)] ==
',')
66 str +=
strlen(sleepstr) + 1;
70 "kernel sleep profiling enabled (shift: %ld)\n",
74 "kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
78 if (str[
strlen(schedstr)] ==
',')
79 str +=
strlen(schedstr) + 1;
83 "kernel schedule profiling enabled (shift: %ld)\n",
87 if (str[
strlen(kvmstr)] ==
',')
92 "kernel KVM profiling enabled (shift: %ld)\n",
102 __setup(
"profile=", profile_setup);
113 buffer_bytes = prof_len*
sizeof(
atomic_t);
115 if (!alloc_cpumask_var(&prof_cpu_mask,
GFP_KERNEL))
129 prof_buffer =
vzalloc(buffer_bytes);
133 free_cpumask_var(prof_cpu_mask);
152 return (ret == NOTIFY_OK) ? 1 : 0;
179 &task_exit_notifier, n);
183 &munmap_notifier, n);
198 &task_exit_notifier, n);
202 &munmap_notifier, n);
261 static void __profile_flip_buffers(
void *
unused)
285 hits[
i].hits = hits[
i].pc = 0;
306 static void do_profile_hits(
int type,
void *__pc,
unsigned int nr_hits)
312 pc =
min((pc - (
unsigned long)_stext) >> prof_shift, prof_len - 1);
316 hits =
per_cpu(cpu_profile_hits, cpu)[
per_cpu(cpu_profile_flip, cpu)];
329 if (hits[i + j].pc == pc) {
330 hits[i +
j].
hits += nr_hits;
332 }
else if (!hits[i + j].hits) {
334 hits[i +
j].hits = nr_hits;
338 i = (i + secondary) & (NR_PROFILE_HIT - 1);
339 }
while (i != primary);
347 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
356 unsigned long action,
void *__cpu)
358 int node, cpu = (
unsigned long)__cpu;
365 per_cpu(cpu_profile_flip, cpu) = 0;
366 if (!
per_cpu(cpu_profile_hits, cpu)[1]) {
367 page = alloc_pages_exact_node(node,
371 return notifier_from_errno(-
ENOMEM);
374 if (!
per_cpu(cpu_profile_hits, cpu)[0]) {
375 page = alloc_pages_exact_node(node,
387 return notifier_from_errno(-
ENOMEM);
390 if (prof_cpu_mask !=
NULL)
391 cpumask_set_cpu(cpu, prof_cpu_mask);
397 if (prof_cpu_mask !=
NULL)
398 cpumask_clear_cpu(cpu, prof_cpu_mask);
399 if (
per_cpu(cpu_profile_hits, cpu)[0]) {
404 if (
per_cpu(cpu_profile_hits, cpu)[1]) {
414 #define profile_flip_buffers() do { } while (0)
415 #define profile_discard_flip_buffers() do { } while (0)
416 #define profile_cpu_callback NULL
418 static void do_profile_hits(
int type,
void *__pc,
unsigned int nr_hits)
421 pc = ((
unsigned long)__pc - (
unsigned long)
_stext) >> prof_shift;
428 if (
prof_on != type || !prof_buffer)
430 do_profile_hits(type, __pc, nr_hits);
445 #ifdef CONFIG_PROC_FS
448 #include <asm/uaccess.h>
450 static int prof_cpu_mask_proc_show(
struct seq_file *
m,
void *
v)
452 seq_cpumask(m, prof_cpu_mask);
468 if (!alloc_cpumask_var(&new_value,
GFP_KERNEL))
471 err = cpumask_parse_user(buffer, count, new_value);
473 cpumask_copy(prof_cpu_mask, new_value);
476 free_cpumask_var(new_value);
481 .
open = prof_cpu_mask_proc_open,
485 .write = prof_cpu_mask_proc_write,
491 proc_create(
"prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops);
501 read_profile(
struct file *file,
char __user *
buf,
size_t count, loff_t *ppos)
503 unsigned long p = *ppos;
506 unsigned int sample_step = 1 << prof_shift;
509 if (p >= (prof_len+1)*
sizeof(
unsigned int))
511 if (count > (prof_len+1)*
sizeof(
unsigned int) - p)
512 count = (prof_len+1)*
sizeof(
unsigned int) -
p;
515 while (p <
sizeof(
unsigned int) && count > 0) {
516 if (
put_user(*((
char *)(&sample_step)+p), buf))
518 buf++; p++; count--; read++;
520 pnt = (
char *)prof_buffer + p -
sizeof(
atomic_t);
534 static ssize_t write_profile(
struct file *file,
const char __user *buf,
535 size_t count, loff_t *ppos)
540 if (count ==
sizeof(
int)) {
541 unsigned int multiplier;
556 .
read = read_profile,
557 .write = write_profile,
562 static void profile_nop(
void *
unused)
566 static int create_hash_tables(
void)
574 page = alloc_pages_exact_node(node,
579 per_cpu(cpu_profile_hits, cpu)[1]
581 page = alloc_pages_exact_node(node,
586 per_cpu(cpu_profile_hits, cpu)[0]
597 if (
per_cpu(cpu_profile_hits, cpu)[0]) {
602 if (
per_cpu(cpu_profile_hits, cpu)[1]) {
611 #define create_hash_tables() ({ 0; })
614 int __ref create_proc_profile(
void)
620 if (create_hash_tables())
623 NULL, &proc_profile_operations);