12 #include <linux/slab.h>
19 static int ftrace_graph_skip_irqs;
33 struct ftrace_graph_ent_entry
ent;
34 struct ftrace_graph_ret_entry
ret;
39 #define TRACE_GRAPH_INDENT 2
42 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
43 #define TRACE_GRAPH_PRINT_CPU 0x2
44 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45 #define TRACE_GRAPH_PRINT_PROC 0x8
46 #define TRACE_GRAPH_PRINT_DURATION 0x10
47 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48 #define TRACE_GRAPH_PRINT_IRQS 0x40
95 unsigned long frame_pointer)
97 unsigned long long calltime;
110 if (
current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
117 index = ++
current->curr_ret_stack;
132 unsigned long frame_pointer)
136 index =
current->curr_ret_stack;
146 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
163 WARN(1,
"Bad frame pointer: expected %lx, received %lx\n"
164 " from func %ps return to %lx\n",
167 (
void *)
current->ret_stack[index].func,
168 current->ret_stack[index].ret);
190 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
192 ftrace_graph_return(&trace);
214 struct ftrace_graph_ent_entry *
entry;
220 sizeof(*entry), flags, pc);
224 entry->graph_ent = *
trace;
231 static inline int ftrace_graph_ignore_irqs(
void)
249 if (!ftrace_trace_task(
current))
253 if (!(trace->
depth || ftrace_graph_addr(trace->
func)) ||
254 ftrace_graph_ignore_irqs())
261 if (
likely(disabled == 1)) {
284 unsigned long ip,
unsigned long flags,
int pc)
304 unsigned long ip,
unsigned long parent_ip,
307 __trace_graph_function(tr, ip, flags, pc);
318 struct ftrace_graph_ret_entry *
entry;
324 sizeof(*entry), flags, pc);
346 if (
likely(disabled == 1)) {
390 static void graph_trace_reset(
struct trace_array *tr)
393 unregister_ftrace_graph();
396 static int max_bytes_for_cpu;
415 #define TRACE_GRAPH_PROCINFO_LENGTH 14
439 for (i = 0; i < spaces / 2; i++) {
450 for (i = 0; i < spaces - (spaces / 2); i++) {
481 if (*last_pid == pid)
484 prev_pid = *last_pid;
498 " ------------------------------------------\n");
502 ret = print_graph_cpu(s, cpu);
506 ret = print_graph_proc(s, prev_pid);
514 ret = print_graph_proc(s, pid);
519 "\n ------------------------------------------\n\n");
526 static struct ftrace_graph_ret_entry *
528 struct ftrace_graph_ent_entry *
curr)
533 struct ftrace_graph_ret_entry *
next;
539 if (data && data->
failed) {
544 ring_iter = trace_buffer_iter(iter, iter->
cpu);
579 data->
ret.ent.type = next->ent.type;
586 if (curr->ent.pid != next->ent.pid ||
587 curr->graph_ent.func != next->ret.func)
599 unsigned long usecs_rem;
605 (
unsigned long)t, usecs_rem);
615 if (addr < (
unsigned long)__irqentry_text_start ||
616 addr >= (
unsigned long)__irqentry_text_end)
622 ret = print_graph_abs_time(iter->
ts, s);
629 ret = print_graph_cpu(s, cpu);
636 ret = print_graph_proc(s, pid);
672 unsigned long nsecs_rem =
do_div(duration, 1000);
679 sprintf(msecs_str,
"%lu", (
unsigned long) duration);
690 size_t slen =
min_t(
size_t,
sizeof(nsecs_str), 8
UL - len);
692 snprintf(nsecs_str, slen,
"%03lu", nsecs_rem);
704 for (i = len; i < 7; i++) {
738 if (duration > 100000ULL)
741 else if (duration > 10000ULL)
771 struct ftrace_graph_ent_entry *entry,
772 struct ftrace_graph_ret_entry *ret_entry,
782 graph_ret = &ret_entry->ret;
783 call = &entry->graph_ent;
800 if (call->
depth < FTRACE_RETFUNC_DEPTH)
805 ret = print_graph_duration(duration, s, flags);
825 struct ftrace_graph_ent_entry *entry,
841 if (call->
depth < FTRACE_RETFUNC_DEPTH)
870 int type,
unsigned long addr,
u32 flags)
883 ret = print_graph_irq(iter, addr, type, cpu, ent->
pid, flags);
892 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
893 ret = print_graph_abs_time(iter->
ts, s);
899 if (flags & TRACE_GRAPH_PRINT_CPU) {
900 ret = print_graph_cpu(s, cpu);
906 if (flags & TRACE_GRAPH_PRINT_PROC) {
907 ret = print_graph_proc(s, ent->
pid);
918 ret = print_graph_lat_fmt(s, ent);
939 unsigned long addr,
int depth)
962 if ((addr < (
unsigned long)__irqentry_text_start) ||
963 (addr >= (
unsigned long)__irqentry_text_end))
996 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1005 if (*depth_irq == -1)
1017 if (*depth_irq >= depth) {
1029 print_graph_entry(
struct ftrace_graph_ent_entry *
field,
struct trace_seq *s,
1034 struct ftrace_graph_ret_entry *leaf_ret;
1036 int cpu = iter->
cpu;
1038 if (check_irq_entry(iter, flags, call->
func, call->
depth))
1044 leaf_ret = get_return_for_leaf(iter, field);
1046 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1048 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1073 int cpu = iter->
cpu;
1078 if (check_irq_return(iter, flags, trace->
depth))
1083 int cpu = iter->
cpu;
1094 if (trace->
depth < FTRACE_RETFUNC_DEPTH) {
1101 if (print_graph_prologue(iter, s, 0, 0, flags))
1105 ret = print_graph_duration(duration, s, flags);
1162 if (print_graph_prologue(iter, s, 0, 0, flags))
1183 switch (iter->
ent->type) {
1199 ret =
event->funcs->trace(iter, sym_flags, event);
1221 struct ftrace_graph_ent_entry *
field;
1225 int cpu = iter->
cpu;
1237 if (data && data->
failed) {
1240 ret = print_graph_entry(field, s, iter, flags);
1249 switch (entry->
type) {
1257 struct ftrace_graph_ent_entry saved;
1260 return print_graph_entry(&saved, s, iter, flags);
1263 struct ftrace_graph_ret_entry *
field;
1265 return print_graph_return(&field->ret, s, entry, iter, flags);
1273 return print_graph_comment(s, entry, iter, flags);
1286 print_graph_function_event(
struct trace_iterator *iter,
int flags,
1289 return print_graph_function(iter);
1292 static void print_lat_header(
struct seq_file *s,
u32 flags)
1294 static const char spaces[] =
" "
1299 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1301 if (flags & TRACE_GRAPH_PRINT_CPU)
1303 if (flags & TRACE_GRAPH_PRINT_PROC)
1306 seq_printf(s,
"#%.*s _-----=> irqs-off \n", size, spaces);
1307 seq_printf(s,
"#%.*s / _----=> need-resched \n", size, spaces);
1308 seq_printf(s,
"#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1309 seq_printf(s,
"#%.*s|| / _--=> preempt-depth \n", size, spaces);
1310 seq_printf(s,
"#%.*s||| / \n", size, spaces);
1313 static void __print_graph_headers_flags(
struct seq_file *s,
u32 flags)
1318 print_lat_header(s, flags);
1322 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1324 if (flags & TRACE_GRAPH_PRINT_CPU)
1326 if (flags & TRACE_GRAPH_PRINT_PROC)
1330 if (flags & TRACE_GRAPH_PRINT_DURATION)
1336 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1338 if (flags & TRACE_GRAPH_PRINT_CPU)
1340 if (flags & TRACE_GRAPH_PRINT_PROC)
1344 if (flags & TRACE_GRAPH_PRINT_DURATION)
1369 __print_graph_headers_flags(s, flags);
1407 pr_warning(
"function graph tracer: not enough memory\n");
1420 static int func_graph_set_flag(
u32 old_flags,
u32 bit,
int set)
1422 if (bit == TRACE_GRAPH_PRINT_IRQS)
1423 ftrace_graph_skip_irqs = !
set;
1429 .trace = print_graph_function_event,
1432 static struct trace_event graph_trace_entry_event = {
1434 .funcs = &graph_functions,
1437 static struct trace_event graph_trace_ret_event = {
1439 .funcs = &graph_functions
1443 .name =
"function_graph",
1449 .init = graph_trace_init,
1450 .reset = graph_trace_reset,
1451 .print_line = print_graph_function,
1453 .flags = &tracer_flags,
1454 .set_flag = func_graph_set_flag,
1455 #ifdef CONFIG_FTRACE_SELFTEST
1456 .selftest = trace_selftest_startup_function_graph,
1460 static __init int init_graph_trace(
void)
1462 max_bytes_for_cpu =
snprintf(
NULL, 0,
"%d", nr_cpu_ids - 1);
1465 pr_warning(
"Warning: could not register graph trace events\n");
1470 pr_warning(
"Warning: could not register graph trace events\n");