15 #include <generated/utsrelease.h>
25 #include <linux/linkage.h>
29 #include <linux/module.h>
33 #include <linux/string.h>
35 #include <linux/slab.h>
36 #include <linux/ctype.h>
38 #include <linux/poll.h>
66 static struct tracer_opt dummy_tracer_opt[] = {
72 .opts = dummy_tracer_opt
75 static int dummy_set_flag(
u32 old_flags,
u32 bit,
int set)
86 static int tracing_disabled = 1;
110 static int tracing_set_tracer(
const char *
buf);
112 #define MAX_TRACER_SIZE 100
114 static char *default_bootup_tracer;
116 static int __init set_cmdline_ftrace(
char *
str)
119 default_bootup_tracer = bootup_tracer_buf;
124 __setup(
"ftrace=", set_cmdline_ftrace);
126 static int __init set_ftrace_dump_on_oops(
char *
str)
128 if (*str++ !=
'=' || !*str) {
133 if (!
strcmp(
"orig_cpu", str)) {
140 __setup(
"ftrace_dump_on_oops", set_ftrace_dump_on_oops);
169 return filter_check_discard(call, rec, buffer, event);
178 if (!global_trace.buffer)
202 static int tracer_enabled = 1;
214 return tracer_enabled;
227 #define TRACE_BUF_SIZE_DEFAULT 1441792UL
268 static inline void trace_access_lock(
int cpu)
284 static inline void trace_access_unlock(
int cpu)
294 static inline void trace_access_lock_init(
void)
306 static inline void trace_access_lock(
int cpu)
312 static inline void trace_access_unlock(
int cpu)
318 static inline void trace_access_lock_init(
void)
333 static int trace_stop_count;
351 if (global_trace.buffer)
359 global_trace.buffer_disabled = 0;
373 if (global_trace.buffer)
381 global_trace.buffer_disabled = 1;
390 if (global_trace.buffer)
392 return !global_trace.buffer_disabled;
412 static int __init set_buf_size(
char *str)
425 __setup(
"trace_buf_size=", set_buf_size);
427 static int __init set_tracing_thresh(
char *str)
440 __setup(
"tracing_thresh=", set_tracing_thresh);
448 static const char *trace_options[] = {
493 memset(parser, 0,
sizeof(*parser));
523 size_t cnt, loff_t *ppos)
530 trace_parser_clear(parser);
565 if (parser->
idx < parser->
size - 1)
581 parser->
cont =
false;
652 #ifdef CONFIG_TRACER_MAX_TRACE
653 unsigned long __read_mostly tracing_max_latency;
669 max_data = max_tr.data[
cpu];
699 if (trace_stop_count)
703 if (!current_trace->use_max_tr) {
709 tr->
buffer = max_tr.buffer;
712 __update_max_tr(tr, tsk, cpu);
729 if (trace_stop_count)
733 if (!current_trace->use_max_tr) {
740 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->
buffer, cpu);
750 "Failed to swap buffers due to commit in progress\n");
755 __update_max_tr(tr, tsk, cpu);
772 pr_info(
"Tracer must have a name\n");
783 tracing_selftest_running =
true;
785 for (t = trace_types;
t; t = t->
next) {
788 pr_info(
"Tracer %s already registered\n",
798 type->
flags = &dummy_tracer_flags;
800 if (!type->
flags->opts)
801 type->
flags->opts = dummy_tracer_opt;
806 #ifdef CONFIG_FTRACE_STARTUP_TEST
808 struct tracer *saved_tracer = current_trace;
820 current_trace =
type;
829 ret = type->selftest(type, tr);
831 current_trace = saved_tracer;
850 type->
next = trace_types;
854 tracing_selftest_running =
false;
857 if (ret || !default_bootup_tracer)
865 tracing_set_tracer(type->
name);
866 default_bootup_tracer =
NULL;
869 #ifdef CONFIG_FTRACE_STARTUP_TEST
870 printk(
KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
883 for (t = &trace_types; *
t; t = &(*t)->
next) {
893 if (type == current_trace && tracer_enabled) {
896 if (current_trace->stop)
897 current_trace->stop(&global_trace);
945 #define SAVED_CMDLINES 128
946 #define NO_CMDLINE_MAP UINT_MAX
950 static int cmdline_idx;
956 static void trace_init_cmdlines(
void)
965 return trace_stop_count;
978 tracing_disabled = 1;
994 if (tracing_disabled)
998 if (--trace_stop_count) {
999 if (trace_stop_count < 0) {
1002 trace_stop_count = 0;
1010 buffer = global_trace.buffer;
1014 buffer = max_tr.buffer;
1034 unsigned long flags;
1038 if (trace_stop_count++)
1044 buffer = global_trace.buffer;
1048 buffer = max_tr.buffer;
1060 static void trace_save_cmdline(
struct task_struct *tsk)
1076 idx = map_pid_to_cmdline[tsk->
pid];
1086 pid = map_cmdline_to_pid[
idx];
1090 map_cmdline_to_pid[
idx] = tsk->
pid;
1091 map_pid_to_cmdline[tsk->
pid] =
idx;
1122 map = map_pid_to_cmdline[
pid];
1124 strcpy(comm, saved_cmdlines[map]);
1134 if (
atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1138 trace_save_cmdline(tsk);
1148 entry->
pid = (tsk) ? tsk->
pid : 0;
1151 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1171 if (event !=
NULL) {
1189 ftrace_trace_stack(buffer, flags, 6, pc);
1190 ftrace_trace_userstack(buffer, flags, pc);
1198 unsigned long flags,
int pc)
1200 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1205 int type,
unsigned long len,
1206 unsigned long flags,
int pc)
1208 *current_rb = global_trace.buffer;
1210 type, len, flags, pc);
1216 unsigned long flags,
int pc)
1218 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1224 unsigned long flags,
int pc)
1226 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1232 unsigned long flags,
int pc,
1237 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1238 ftrace_trace_userstack(buffer, flags, pc);
1251 unsigned long ip,
unsigned long parent_ip,
unsigned long flags,
1257 struct ftrace_entry *
entry;
1269 entry->parent_ip = parent_ip;
1271 if (!filter_check_discard(call, entry, buffer, event))
1277 unsigned long ip,
unsigned long parent_ip,
unsigned long flags,
1284 #ifdef CONFIG_STACKTRACE
1286 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1287 struct ftrace_stack {
1288 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1294 static void __ftrace_trace_stack(
struct ring_buffer *buffer,
1295 unsigned long flags,
1300 struct stack_entry *
entry;
1301 struct stack_trace
trace;
1305 trace.nr_entries = 0;
1325 if (use_stack == 1) {
1327 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1334 if (
trace.nr_entries > size)
1335 size =
trace.nr_entries;
1340 size *=
sizeof(
unsigned long);
1343 sizeof(*entry) + size, flags, pc);
1348 memset(&entry->caller, 0, size);
1352 trace.nr_entries *
sizeof(
unsigned long));
1355 trace.entries = entry->caller;
1362 entry->size =
trace.nr_entries;
1364 if (!filter_check_discard(call, entry, buffer, event))
1375 void ftrace_trace_stack_regs(
struct ring_buffer *buffer,
unsigned long flags,
1376 int skip,
int pc,
struct pt_regs *regs)
1381 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1384 void ftrace_trace_stack(
struct ring_buffer *buffer,
unsigned long flags,
1390 __ftrace_trace_stack(buffer, flags, skip, pc,
NULL);
1393 void __trace_stack(
struct trace_array *tr,
unsigned long flags,
int skip,
1396 __ftrace_trace_stack(tr->
buffer, flags, skip, pc,
NULL);
1402 void trace_dump_stack(
void)
1404 unsigned long flags;
1406 if (tracing_disabled || tracing_selftest_running)
1418 ftrace_trace_userstack(
struct ring_buffer *buffer,
unsigned long flags,
int pc)
1422 struct userstack_entry *
entry;
1423 struct stack_trace
trace;
1446 sizeof(*entry), flags, pc);
1448 goto out_drop_count;
1452 memset(&entry->caller, 0,
sizeof(entry->caller));
1454 trace.nr_entries = 0;
1457 trace.entries = entry->caller;
1460 if (!filter_check_discard(call, entry, buffer, event))
1470 static void __trace_userstack(
struct trace_array *tr,
unsigned long flags)
1495 static char *get_trace_buf(
void)
1505 percpu_buffer = trace_percpu_nmi_buffer;
1507 percpu_buffer = trace_percpu_irq_buffer;
1509 percpu_buffer = trace_percpu_sirq_buffer;
1511 percpu_buffer = trace_percpu_buffer;
1521 static int alloc_percpu_trace_buffer(
void)
1544 trace_percpu_buffer = buffers;
1545 trace_percpu_sirq_buffer = sirq_buffers;
1546 trace_percpu_irq_buffer = irq_buffers;
1547 trace_percpu_nmi_buffer = nmi_buffers;
1558 WARN(1,
"Could not allocate percpu trace_printk buffer");
1564 static int buffers_allocated;
1566 if (buffers_allocated)
1569 if (alloc_percpu_trace_buffer())
1572 pr_info(
"ftrace: Allocated trace_printk buffers\n");
1574 buffers_allocated = 1;
1587 struct bprint_entry *
entry;
1588 unsigned long flags;
1592 if (
unlikely(tracing_selftest_running || tracing_disabled))
1596 pause_graph_tracing();
1601 tbuffer = get_trace_buf();
1613 size =
sizeof(*entry) +
sizeof(
u32) * len;
1623 memcpy(entry->buf, tbuffer,
sizeof(
u32) * len);
1624 if (!filter_check_discard(call, entry, buffer, event)) {
1626 ftrace_trace_stack(buffer, flags, 6, pc);
1631 unpause_graph_tracing();
1638 unsigned long ip,
const char *
fmt, ...)
1659 struct print_entry *
entry;
1660 unsigned long flags;
1663 if (tracing_disabled || tracing_selftest_running)
1667 pause_graph_tracing();
1673 tbuffer = get_trace_buf();
1684 size =
sizeof(*entry) + len + 1;
1693 memcpy(&entry->buf, tbuffer, len);
1694 entry->buf[len] =
'\0';
1695 if (!filter_check_discard(call, entry, buffer, event)) {
1697 ftrace_trace_stack(buffer, flags, 6, pc);
1701 unpause_graph_tracing();
1712 static void trace_iterator_increment(
struct trace_iterator *iter)
1723 unsigned long *lost_events)
1744 unsigned long *missing_events,
u64 *ent_ts)
1748 unsigned long lost_events = 0, next_lost = 0;
1750 u64 next_ts = 0,
ts;
1762 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1764 *ent_cpu = cpu_file;
1774 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1779 if (ent && (!next || ts < next_ts)) {
1783 next_lost = lost_events;
1797 *missing_events = next_lost;
1804 int *ent_cpu,
u64 *ent_ts)
1806 return __find_next_entry(iter, ent_cpu,
NULL, ent_ts);
1812 iter->
ent = __find_next_entry(iter, &iter->
cpu,
1816 trace_iterator_increment(iter);
1818 return iter->
ent ? iter :
NULL;
1827 static void *s_next(
struct seq_file *
m,
void *
v, loff_t *
pos)
1846 while (ent && iter->
idx < i)
1862 tr->
data[
cpu]->skipped_entries = 0;
1864 buf_iter = trace_buffer_iter(iter, cpu);
1876 if (ts >= iter->
tr->time_start)
1889 static void *s_start(
struct seq_file *m, loff_t *pos)
1892 static struct tracer *old_tracer;
1900 if (
unlikely(old_tracer != current_trace && current_trace)) {
1901 old_tracer = current_trace;
1902 *iter->
trace = *current_trace;
1908 if (*pos != iter->
pos) {
1920 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1932 p = s_next(m, p, &l);
1937 trace_access_lock(cpu_file);
1941 static void s_stop(
struct seq_file *m,
void *p)
1946 trace_access_unlock(iter->
cpu_file);
1951 get_total_entries(
struct trace_array *tr,
unsigned long *total,
unsigned long *
entries)
1953 unsigned long count;
1966 if (tr->
data[cpu]->skipped_entries) {
1967 count -= tr->
data[
cpu]->skipped_entries;
1977 static void print_lat_help_header(
struct seq_file *m)
1979 seq_puts(m,
"# _------=> CPU# \n");
1980 seq_puts(m,
"# / _-----=> irqs-off \n");
1981 seq_puts(m,
"# | / _----=> need-resched \n");
1982 seq_puts(m,
"# || / _---=> hardirq/softirq \n");
1983 seq_puts(m,
"# ||| / _--=> preempt-depth \n");
1985 seq_puts(m,
"# cmd pid ||||| time | caller \n");
1986 seq_puts(m,
"# \\ / ||||| \\ | / \n");
1991 unsigned long total;
1994 get_total_entries(tr, &total, &entries);
1995 seq_printf(m,
"# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2002 print_event_info(tr, m);
2003 seq_puts(m,
"# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2009 print_event_info(tr, m);
2010 seq_puts(m,
"# _-----=> irqs-off\n");
2011 seq_puts(m,
"# / _----=> need-resched\n");
2012 seq_puts(m,
"# | / _---=> hardirq/softirq\n");
2013 seq_puts(m,
"# || / _--=> preempt-depth\n");
2015 seq_puts(m,
"# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2027 unsigned long total;
2028 const char *
name =
"preemption";
2033 get_total_entries(tr, &total, &entries);
2035 seq_printf(m,
"# %s latency trace v1.1.5 on %s\n",
2037 seq_puts(m,
"# -----------------------------------"
2038 "---------------------------------\n");
2039 seq_printf(m,
"# latency: %lu us, #%lu/%lu, CPU#%d |"
2040 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2045 #
if defined(CONFIG_PREEMPT_NONE)
2047 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2049 #elif defined(CONFIG_PREEMPT)
2061 seq_puts(m,
"# -----------------\n");
2063 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2067 seq_puts(m,
"# -----------------\n");
2095 if (iter->
tr->data[iter->
cpu]->skipped_entries)
2115 test_cpu_buff_start(iter);
2130 return event->funcs->trace(iter, sym_flags, event);
2148 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2156 return event->funcs->raw(iter, 0, event);
2169 unsigned char newline =
'\n';
2175 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2183 enum print_line_t ret =
event->funcs->hex(iter, 0, event);
2201 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2208 return event ?
event->funcs->binary(iter, 0, event) :
2220 buf_iter = trace_buffer_iter(iter, cpu);
2232 buf_iter = trace_buffer_iter(iter, cpu);
2255 if (iter->
trace && iter->
trace->print_line) {
2256 ret = iter->
trace->print_line(iter);
2268 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2272 return print_bin_fmt(iter);
2275 return print_hex_fmt(iter);
2278 return print_raw_fmt(iter);
2280 return print_trace_fmt(iter);
2295 print_lat_help_header(m);
2302 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2311 print_lat_help_header(m);
2315 print_func_help_header_irq(iter->
tr, m);
2317 print_func_help_header(iter->
tr, m);
2322 static void test_ftrace_alive(
struct seq_file *m)
2326 seq_printf(m,
"# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2327 seq_printf(m,
"# MAY BE MISSING FUNCTION EVENTS\n");
2330 static int s_show(
struct seq_file *m,
void *v)
2339 test_ftrace_alive(m);
2341 if (iter->
trace && iter->
trace->print_header)
2342 iter->
trace->print_header(m);
2386 if (tracing_disabled)
2408 *iter->
trace = *current_trace;
2413 if (current_trace && current_trace->print_max)
2416 iter->
tr = &global_trace;
2423 iter->
trace->open(iter);
2466 if (tracing_disabled)
2473 static int tracing_release(
struct inode *inode,
struct file *file)
2491 iter->
trace->close(iter);
2498 free_cpumask_var(iter->
started);
2505 static int tracing_open(
struct inode *inode,
struct file *file)
2522 iter = __tracing_open(inode, file);
2524 ret = PTR_ERR(iter);
2532 t_next(
struct seq_file *m,
void *v, loff_t *pos)
2544 static void *t_start(
struct seq_file *m, loff_t *pos)
2550 for (t = trace_types; t && l < *
pos; t = t_next(m, t, &l))
2556 static void t_stop(
struct seq_file *m,
void *p)
2561 static int t_show(
struct seq_file *m,
void *v)
2584 static int show_traces_open(
struct inode *inode,
struct file *file)
2586 if (tracing_disabled)
2589 return seq_open(file, &show_traces_seq_ops);
2593 tracing_write_stub(
struct file *filp,
const char __user *ubuf,
2594 size_t count, loff_t *ppos)
2599 static loff_t tracing_seek(
struct file *file, loff_t
offset,
int origin)
2608 .open = tracing_open,
2610 .write = tracing_write_stub,
2611 .llseek = tracing_seek,
2612 .release = tracing_release,
2616 .open = show_traces_open,
2637 static char mask_str[
NR_CPUS + 1];
2640 tracing_cpumask_read(
struct file *filp,
char __user *ubuf,
2641 size_t count, loff_t *ppos)
2647 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2648 if (count - len < 2) {
2652 len +=
sprintf(mask_str + len,
"\n");
2662 tracing_cpumask_write(
struct file *filp,
const char __user *ubuf,
2663 size_t count, loff_t *ppos)
2668 if (!alloc_cpumask_var(&tracing_cpumask_new,
GFP_KERNEL))
2671 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2686 atomic_inc(&global_trace.data[cpu]->disabled);
2691 atomic_dec(&global_trace.data[cpu]->disabled);
2698 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2701 free_cpumask_var(tracing_cpumask_new);
2706 free_cpumask_var(tracing_cpumask_new);
2713 .read = tracing_cpumask_read,
2714 .write = tracing_cpumask_write,
2718 static int tracing_trace_options_show(
struct seq_file *m,
void *v)
2725 tracer_flags = current_trace->flags->val;
2726 trace_opts = current_trace->flags->opts;
2728 for (i = 0; trace_options[
i]; i++) {
2729 if (trace_flags & (1 << i))
2735 for (i = 0; trace_opts[
i].
name; i++) {
2736 if (tracer_flags & trace_opts[i].
bit)
2746 static int __set_tracer_option(
struct tracer *
trace,
2747 struct tracer_flags *tracer_flags,
2757 tracer_flags->
val &= ~opts->
bit;
2759 tracer_flags->
val |= opts->
bit;
2764 static int set_tracer_option(
struct tracer *trace,
char *cmp,
int neg)
2766 struct tracer_flags *tracer_flags = trace->
flags;
2770 for (i = 0; tracer_flags->
opts[
i].name; i++) {
2771 opts = &tracer_flags->
opts[
i];
2774 return __set_tracer_option(trace, trace->
flags,
2781 static void set_tracer_flags(
unsigned int mask,
int enabled)
2784 if (!!(trace_flags & mask) == !!enabled)
2788 trace_flags |=
mask;
2790 trace_flags &= ~mask;
2800 tracing_trace_options_write(
struct file *filp,
const char __user *ubuf,
2801 size_t cnt, loff_t *ppos)
2809 if (cnt >=
sizeof(buf))
2816 cmp = strstrip(buf);
2818 if (
strncmp(cmp,
"no", 2) == 0) {
2823 for (i = 0; trace_options[
i]; i++) {
2824 if (
strcmp(cmp, trace_options[i]) == 0) {
2825 set_tracer_flags(1 << i, !neg);
2831 if (!trace_options[i]) {
2833 ret = set_tracer_option(current_trace, cmp, neg);
2844 static int tracing_trace_options_open(
struct inode *inode,
struct file *file)
2846 if (tracing_disabled)
2852 .open = tracing_trace_options_open,
2856 .write = tracing_trace_options_write,
2859 static const char readme_msg[] =
2860 "tracing mini-HOWTO:\n\n"
2861 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2862 "# cat /sys/kernel/debug/tracing/available_tracers\n"
2863 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2864 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2866 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2867 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2869 "# cat /sys/kernel/debug/tracing/trace_options\n"
2870 "noprint-parent nosym-offset nosym-addr noverbose\n"
2871 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2872 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2873 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2874 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2878 tracing_readme_read(
struct file *filp,
char __user *ubuf,
2879 size_t cnt, loff_t *ppos)
2882 readme_msg,
strlen(readme_msg));
2887 .read = tracing_readme_read,
2892 tracing_saved_cmdlines_read(
struct file *file,
char __user *ubuf,
2893 size_t cnt, loff_t *ppos)
2917 pid = map_cmdline_to_pid[
i];
2922 r =
sprintf(buf,
"%d %s\n", pid, buf_comm);
2938 .read = tracing_saved_cmdlines_read,
2943 tracing_ctrl_read(
struct file *filp,
char __user *ubuf,
2944 size_t cnt, loff_t *ppos)
2949 r =
sprintf(buf,
"%u\n", tracer_enabled);
2954 tracing_ctrl_write(
struct file *filp,
const char __user *ubuf,
2955 size_t cnt, loff_t *ppos)
2968 if (tracer_enabled ^ val) {
2971 WARN_ONCE(1,
"tracing_enabled is deprecated. Use tracing_on");
2975 if (current_trace->start)
2976 current_trace->start(tr);
2981 if (current_trace->stop)
2982 current_trace->stop(tr);
2993 tracing_set_trace_read(
struct file *filp,
char __user *ubuf,
2994 size_t cnt, loff_t *ppos)
3001 r =
sprintf(buf,
"%s\n", current_trace->name);
3015 static void set_buffer_entries(
struct trace_array *tr,
unsigned long val)
3019 tr->
data[cpu]->entries = val;
3022 static
int __tracing_resize_ring_buffer(
unsigned long size,
int cpu)
3037 if (!current_trace->use_max_tr)
3048 global_trace.data[i]->entries,
3055 global_trace.data[cpu]->entries,
3075 tracing_disabled = 1;
3081 set_buffer_entries(&max_tr, size);
3083 max_tr.data[
cpu]->entries =
size;
3087 set_buffer_entries(&global_trace, size);
3089 global_trace.data[
cpu]->entries =
size;
3094 static ssize_t tracing_resize_ring_buffer(
unsigned long size,
int cpu_id)
3108 ret = __tracing_resize_ring_buffer(size, cpu_id);
3135 ret = __tracing_resize_ring_buffer(trace_buf_size,
3150 static int tracing_set_tracer(
const char *buf)
3160 ret = __tracing_resize_ring_buffer(trace_buf_size,
3167 for (t = trace_types;
t; t = t->
next) {
3175 if (t == current_trace)
3178 trace_branch_disable();
3179 if (current_trace && current_trace->reset)
3180 current_trace->reset(tr);
3181 if (current_trace && current_trace->use_max_tr) {
3188 set_buffer_entries(&max_tr, 1);
3190 destroy_trace_option_files(topts);
3194 topts = create_trace_option_files(t);
3200 global_trace.data[cpu]->entries,
3204 max_tr.data[
cpu]->entries =
3205 global_trace.data[
cpu]->entries;
3216 trace_branch_enable(tr);
3224 tracing_set_trace_write(
struct file *filp,
const char __user *ubuf,
3225 size_t cnt, loff_t *ppos)
3243 for (i = cnt - 1; i > 0 &&
isspace(buf[i]); i--)
3246 err = tracing_set_tracer(buf);
3256 tracing_max_lat_read(
struct file *filp,
char __user *ubuf,
3257 size_t cnt, loff_t *ppos)
3263 r =
snprintf(buf,
sizeof(buf),
"%ld\n",
3265 if (r >
sizeof(buf))
3271 tracing_max_lat_write(
struct file *filp,
const char __user *ubuf,
3272 size_t cnt, loff_t *ppos)
3287 static int tracing_open_pipe(
struct inode *inode,
struct file *filp)
3293 if (tracing_disabled)
3315 *iter->
trace = *current_trace;
3323 cpumask_setall(iter->
started);
3329 iter->
tr = &global_trace;
3333 if (iter->
trace->pipe_open)
3334 iter->
trace->pipe_open(iter);
3348 static int tracing_release_pipe(
struct inode *inode,
struct file *file)
3354 if (iter->
trace->pipe_close)
3355 iter->
trace->pipe_close(iter);
3359 free_cpumask_var(iter->
started);
3380 poll_wait(filp, &trace_wait, poll_table);
3421 static int tracing_wait_pipe(
struct file *filp)
3433 iter->
trace->wait_pipe(iter);
3449 if (!tracer_enabled && iter->
pos)
3460 tracing_read_pipe(
struct file *filp,
char __user *ubuf,
3461 size_t cnt, loff_t *ppos)
3464 static struct tracer *old_tracer;
3476 if (
unlikely(old_tracer != current_trace && current_trace)) {
3477 old_tracer = current_trace;
3478 *iter->
trace = *current_trace;
3488 if (iter->
trace->read) {
3489 sret = iter->
trace->read(iter, filp, ubuf, cnt, ppos);
3495 sret = tracing_wait_pipe(filp);
3518 int len = iter->
seq.len;
3523 iter->
seq.len = len;
3527 trace_consume(iter);
3529 if (iter->
seq.len >= cnt)
3537 WARN_ONCE(iter->
seq.full,
"full flag set for trace type %d",
3540 trace_access_unlock(iter->
cpu_file);
3545 if (iter->
seq.readpos >= iter->
seq.len)
3578 .release = tracing_pipe_buf_release,
3591 count = iter->
seq.len;
3605 trace_consume(iter);
3617 static ssize_t tracing_splice_read_pipe(
struct file *filp,
3628 .partial = partial_def,
3632 .ops = &tracing_pipe_buf_ops,
3633 .spd_release = tracing_spd_release_pipe,
3635 static struct tracer *old_tracer;
3645 if (
unlikely(old_tracer != current_trace && current_trace)) {
3646 old_tracer = current_trace;
3647 *iter->
trace = *current_trace;
3653 if (iter->
trace->splice_read) {
3654 ret = iter->
trace->splice_read(iter, filp,
3655 ppos, pipe, len, flags);
3660 ret = tracing_wait_pipe(filp);
3673 for (i = 0, rem = len; i < pipe->
buffers && rem; i++) {
3678 rem = tracing_fill_pipe_page(rem, iter);
3681 ret = trace_seq_to_buffer(&iter->
seq,
3694 trace_access_unlock(iter->
cpu_file);
3715 static int tracing_entries_open(
struct inode *inode,
struct file *filp)
3719 if (tracing_disabled)
3726 info->
tr = &global_trace;
3735 tracing_entries_read(
struct file *filp,
char __user *ubuf,
3736 size_t cnt, loff_t *ppos)
3747 int cpu, buf_size_same;
3756 size = tr->
data[
cpu]->entries;
3757 if (size != tr->
data[cpu]->entries) {
3763 if (buf_size_same) {
3765 r =
sprintf(buf,
"%lu (expanded: %lu)\n",
3767 trace_buf_size >> 10);
3769 r =
sprintf(buf,
"%lu\n", size >> 10);
3782 tracing_entries_write(
struct file *filp,
const char __user *ubuf,
3783 size_t cnt, loff_t *ppos)
3800 ret = tracing_resize_ring_buffer(val, info->
cpu);
3810 tracing_entries_release(
struct inode *inode,
struct file *filp)
3820 tracing_total_entries_read(
struct file *filp,
char __user *ubuf,
3821 size_t cnt, loff_t *ppos)
3826 unsigned long size = 0, expanded_size = 0;
3830 size += tr->
data[
cpu]->entries >> 10;
3832 expanded_size += trace_buf_size >> 10;
3835 r =
sprintf(buf,
"%lu\n", size);
3837 r =
sprintf(buf,
"%lu (expanded: %lu)\n", size, expanded_size);
3844 tracing_free_buffer_write(
struct file *filp,
const char __user *ubuf,
3845 size_t cnt, loff_t *ppos)
3858 tracing_free_buffer_release(
struct inode *inode,
struct file *filp)
3870 tracing_mark_write(
struct file *filp,
const char __user *ubuf,
3871 size_t cnt, loff_t *fpos)
3873 unsigned long addr = (
unsigned long)ubuf;
3876 struct print_entry *
entry;
3877 unsigned long irq_flags;
3888 if (tracing_disabled)
3914 if ((addr &
PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3921 if (ret < nr_pages) {
3928 for (i = 0; i < nr_pages; i++)
3932 size =
sizeof(*entry) + cnt + 2;
3933 buffer = global_trace.buffer;
3945 if (nr_pages == 2) {
3947 memcpy(&entry->buf, map_page[0] + offset, len);
3948 memcpy(&entry->buf[len], map_page[1], cnt - len);
3950 memcpy(&entry->buf, map_page[0] + offset, cnt);
3952 if (entry->buf[cnt - 1] !=
'\n') {
3953 entry->buf[
cnt] =
'\n';
3954 entry->buf[cnt + 1] =
'\0';
3956 entry->buf[
cnt] =
'\0';
3965 for (i = 0; i < nr_pages; i++){
3973 static int tracing_clock_show(
struct seq_file *m,
void *v)
3977 for (i = 0; i <
ARRAY_SIZE(trace_clocks); i++)
3979 "%s%s%s%s", i ?
" " :
"",
3980 i == trace_clock_id ?
"[" :
"", trace_clocks[i].
name,
3981 i == trace_clock_id ?
"]" :
"");
3987 static ssize_t tracing_clock_write(
struct file *filp,
const char __user *ubuf,
3988 size_t cnt, loff_t *fpos)
3991 const char *clockstr;
3994 if (cnt >=
sizeof(buf))
4002 clockstr = strstrip(buf);
4004 for (i = 0; i <
ARRAY_SIZE(trace_clocks); i++) {
4005 if (
strcmp(trace_clocks[i].
name, clockstr) == 0)
4026 static int tracing_clock_open(
struct inode *inode,
struct file *file)
4028 if (tracing_disabled)
4035 .read = tracing_max_lat_read,
4036 .write = tracing_max_lat_write,
4042 .read = tracing_ctrl_read,
4043 .write = tracing_ctrl_write,
4049 .read = tracing_set_trace_read,
4050 .write = tracing_set_trace_write,
4055 .open = tracing_open_pipe,
4056 .poll = tracing_poll_pipe,
4057 .read = tracing_read_pipe,
4058 .splice_read = tracing_splice_read_pipe,
4059 .release = tracing_release_pipe,
4064 .open = tracing_entries_open,
4065 .read = tracing_entries_read,
4066 .write = tracing_entries_write,
4067 .release = tracing_entries_release,
4073 .read = tracing_total_entries_read,
4078 .write = tracing_free_buffer_write,
4079 .release = tracing_free_buffer_release,
4084 .write = tracing_mark_write,
4089 .open = tracing_clock_open,
4093 .write = tracing_clock_write,
4103 static int tracing_buffers_open(
struct inode *inode,
struct file *filp)
4108 if (tracing_disabled)
4115 info->
tr = &global_trace;
4119 info->
read = (
unsigned int)-1;
4127 tracing_buffers_read(
struct file *filp,
char __user *ubuf,
4128 size_t count, loff_t *ppos)
4146 trace_access_lock(info->
cpu);
4151 trace_access_unlock(info->
cpu);
4173 static int tracing_buffers_release(
struct inode *inode,
struct file *file)
4217 .release = buffer_pipe_buf_release,
4219 .get = buffer_pipe_buf_get,
4226 static void buffer_spd_release(
struct splice_pipe_desc *spd,
unsigned int i)
4240 tracing_buffers_splice_read(
struct file *file, loff_t *ppos,
4249 .partial = partial_def,
4252 .ops = &buffer_pipe_buf_ops,
4253 .spd_release = buffer_spd_release,
4263 WARN_ONCE(1,
"Ftrace: previous read must page-align\n");
4269 WARN_ONCE(1,
"Ftrace: splice_read should page-align\n");
4277 trace_access_lock(info->
cpu);
4324 trace_access_unlock(info->
cpu);
4344 .open = tracing_buffers_open,
4345 .read = tracing_buffers_read,
4346 .release = tracing_buffers_release,
4347 .splice_read = tracing_buffers_splice_read,
4352 tracing_stats_read(
struct file *filp,
char __user *ubuf,
4353 size_t count, loff_t *ppos)
4359 unsigned long long t;
4360 unsigned long usec_rem;
4397 .read = tracing_stats_read,
4401 #ifdef CONFIG_DYNAMIC_FTRACE
4403 int __weak ftrace_arch_read_dyn_info(
char *buf,
int size)
4409 tracing_read_dyn_info(
struct file *filp,
char __user *ubuf,
4410 size_t cnt, loff_t *ppos)
4412 static char ftrace_dyn_info_buffer[1024];
4415 char *buf = ftrace_dyn_info_buffer;
4416 int size =
ARRAY_SIZE(ftrace_dyn_info_buffer);
4422 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4434 .read = tracing_read_dyn_info,
4439 static struct dentry *d_tracer;
4453 if (!d_tracer && !once) {
4455 pr_warning(
"Could not create debugfs directory 'tracing'\n");
4462 static struct dentry *d_percpu;
4479 if (!d_percpu && !once) {
4481 pr_warning(
"Could not create debugfs directory 'per_cpu'\n");
4488 static void tracing_init_debugfs_percpu(
long cpu)
4497 snprintf(cpu_dir, 30,
"cpu%ld", cpu);
4500 pr_warning(
"Could not create debugfs '%s' entry\n", cpu_dir);
4506 (
void *) cpu, &tracing_pipe_fops);
4510 (
void *) cpu, &tracing_fops);
4513 (
void *) cpu, &tracing_buffers_fops);
4516 (
void *) cpu, &tracing_stats_fops);
4519 (
void *) cpu, &tracing_entries_fops);
4522 #ifdef CONFIG_FTRACE_SELFTEST
4534 trace_options_read(
struct file *filp,
char __user *ubuf,
size_t cnt,
4540 if (topt->
flags->val & topt->
opt->bit)
4549 trace_options_write(
struct file *filp,
const char __user *ubuf,
size_t cnt,
4560 if (val != 0 && val != 1)
4563 if (!!(topt->
flags->val & topt->
opt->bit) != val) {
4565 ret = __set_tracer_option(current_trace, topt->
flags,
4580 .read = trace_options_read,
4581 .write = trace_options_write,
4586 trace_options_core_read(
struct file *filp,
char __user *ubuf,
size_t cnt,
4592 if (trace_flags & (1 << index))
4601 trace_options_core_write(
struct file *filp,
const char __user *ubuf,
size_t cnt,
4612 if (val != 0 && val != 1)
4614 set_tracer_flags(1 << index, val);
4623 .read = trace_options_core_read,
4624 .write = trace_options_core_write,
4638 pr_warning(
"Could not create debugfs '%s' entry\n", name);
4644 static struct dentry *trace_options_init_dentry(
void)
4647 static struct dentry *t_options;
4658 pr_warning(
"Could not create debugfs directory 'options'\n");
4667 struct tracer_flags *flags,
4670 struct dentry *t_options;
4672 t_options = trace_options_init_dentry();
4680 &trace_options_fops);
4688 struct tracer_flags *
flags;
4695 flags = tracer->
flags;
4697 if (!flags || !flags->
opts)
4702 for (cnt = 0; opts[
cnt].
name; cnt++)
4705 topts = kcalloc(cnt + 1,
sizeof(*topts),
GFP_KERNEL);
4709 for (cnt = 0; opts[
cnt].
name; cnt++)
4710 create_trace_option_file(&topts[cnt], flags,
4724 for (cnt = 0; topts[
cnt].
opt; cnt++) {
4725 if (topts[cnt].entry)
4733 create_trace_option_core_file(
const char *
option,
long index)
4735 struct dentry *t_options;
4737 t_options = trace_options_init_dentry();
4742 &trace_options_core_fops);
4745 static __init void create_trace_options_dir(
void)
4747 struct dentry *t_options;
4750 t_options = trace_options_init_dentry();
4754 for (i = 0; trace_options[
i]; i++)
4755 create_trace_option_core_file(trace_options[i], i);
4759 rb_simple_read(
struct file *filp,
char __user *ubuf,
4760 size_t cnt, loff_t *ppos)
4778 rb_simple_write(
struct file *filp,
const char __user *ubuf,
4779 size_t cnt, loff_t *ppos)
4804 .read = rb_simple_read,
4805 .write = rb_simple_write,
4809 static __init int tracer_init_debugfs(
void)
4814 trace_access_lock_init();
4819 &global_trace, &tracing_ctrl_fops);
4822 NULL, &tracing_iter_fops);
4825 NULL, &tracing_cpumask_fops);
4831 &global_trace, &show_traces_fops);
4834 &global_trace, &set_tracer_fops);
4836 #ifdef CONFIG_TRACER_MAX_TRACE
4838 &tracing_max_latency, &tracing_max_lat_fops);
4842 &tracing_thresh, &tracing_max_lat_fops);
4845 NULL, &tracing_readme_fops);
4854 &global_trace, &tracing_total_entries_fops);
4857 &global_trace, &tracing_free_buffer_fops);
4860 NULL, &tracing_mark_fops);
4863 NULL, &tracing_saved_cmdlines_fops);
4869 &global_trace, &rb_simple_fops);
4871 #ifdef CONFIG_DYNAMIC_FTRACE
4873 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4876 create_trace_options_dir();
4879 tracing_init_debugfs_percpu(cpu);
4885 unsigned long event,
void *
unused)
4893 .notifier_call = trace_panic_handler,
4914 .notifier_call = trace_die_handler,
4922 #define TRACE_MAX_PRINT 1000
4929 #define KERN_TRACE KERN_EMERG
4948 iter->
tr = &global_trace;
4949 iter->
trace = current_trace;
4960 unsigned int old_userobj;
4961 static int dump_ran;
4962 unsigned long flags;
4977 printk(
"# WARNING: FUNCTION TRACING IS CORRUPTED\n");
4978 printk(
"# MAY BE MISSING FUNCTION EVENTS\n");
4981 if (disable_tracing)
4993 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4996 iter.
tr = &global_trace;
4997 iter.
trace = current_trace;
4999 switch (oops_dump_mode) {
5041 trace_consume(&iter);
5055 if (!disable_tracing) {
5056 trace_flags |= old_userobj;
5072 __ftrace_dump(
true, oops_dump_mode);
5076 __init static int tracer_alloc_buffers(
void)
5087 if (!alloc_cpumask_var(&tracing_cpumask,
GFP_KERNEL))
5088 goto out_free_buffer_mask;
5091 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5096 ring_buf_size = trace_buf_size;
5107 if (!global_trace.buffer) {
5110 goto out_free_cpumask;
5112 if (global_trace.buffer_disabled)
5116 #ifdef CONFIG_TRACER_MAX_TRACE
5118 if (!max_tr.buffer) {
5119 printk(
KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5122 goto out_free_cpumask;
5128 global_trace.data[
i] = &
per_cpu(global_trace_cpu, i);
5129 max_tr.data[
i] = &
per_cpu(max_tr_data, i);
5132 set_buffer_entries(&global_trace,
5134 #ifdef CONFIG_TRACER_MAX_TRACE
5135 set_buffer_entries(&max_tr, 1);
5138 trace_init_cmdlines();
5143 tracing_disabled = 0;
5146 &trace_panic_notifier);
5153 free_cpumask_var(tracing_cpumask);
5154 out_free_buffer_mask:
5160 __init static int clear_boot_tracer(
void)
5169 if (!default_bootup_tracer)
5173 default_bootup_tracer);
5174 default_bootup_tracer =
NULL;