6 #include <linux/slab.h>
10 switch (entry->
type) {
28 unsigned int loops = 0;
38 if (loops++ > trace_buf_size) {
42 if (!trace_valid_entry(entry)) {
81 ret = trace_test_buffer_cpu(tr, cpu);
95 static inline void warn_failed_init_tracer(
struct tracer *
trace,
int init_ret)
98 trace->
name, init_ret);
100 #ifdef CONFIG_FUNCTION_TRACER
102 #ifdef CONFIG_DYNAMIC_FTRACE
104 static int trace_selftest_test_probe1_cnt;
105 static void trace_selftest_test_probe1_func(
unsigned long ip,
107 struct ftrace_ops *
op,
110 trace_selftest_test_probe1_cnt++;
113 static int trace_selftest_test_probe2_cnt;
114 static void trace_selftest_test_probe2_func(
unsigned long ip,
116 struct ftrace_ops *
op,
119 trace_selftest_test_probe2_cnt++;
122 static int trace_selftest_test_probe3_cnt;
123 static void trace_selftest_test_probe3_func(
unsigned long ip,
125 struct ftrace_ops *
op,
128 trace_selftest_test_probe3_cnt++;
131 static int trace_selftest_test_global_cnt;
132 static void trace_selftest_test_global_func(
unsigned long ip,
134 struct ftrace_ops *
op,
137 trace_selftest_test_global_cnt++;
140 static int trace_selftest_test_dyn_cnt;
141 static void trace_selftest_test_dyn_func(
unsigned long ip,
143 struct ftrace_ops *
op,
146 trace_selftest_test_dyn_cnt++;
149 static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
154 static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
159 static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
164 static struct ftrace_ops test_global = {
165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
169 static void print_counts(
void)
171 printk(
"(%d %d %d %d %d) ",
172 trace_selftest_test_probe1_cnt,
173 trace_selftest_test_probe2_cnt,
174 trace_selftest_test_probe3_cnt,
175 trace_selftest_test_global_cnt,
176 trace_selftest_test_dyn_cnt);
179 static void reset_counts(
void)
181 trace_selftest_test_probe1_cnt = 0;
182 trace_selftest_test_probe2_cnt = 0;
183 trace_selftest_test_probe3_cnt = 0;
184 trace_selftest_test_global_cnt = 0;
185 trace_selftest_test_dyn_cnt = 0;
188 static int trace_selftest_ops(
int cnt)
190 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops;
199 pr_info(
"Testing dynamic ftrace ops #%d: ", cnt);
207 len1 =
strlen(func1_name);
208 len2 =
strlen(func2_name);
229 if (trace_selftest_test_probe1_cnt != 1)
231 if (trace_selftest_test_probe2_cnt != 0)
233 if (trace_selftest_test_probe3_cnt != 1)
235 if (trace_selftest_test_global_cnt == 0)
242 if (trace_selftest_test_probe1_cnt != 1)
244 if (trace_selftest_test_probe2_cnt != 1)
246 if (trace_selftest_test_probe3_cnt != 2)
250 dyn_ops = kzalloc(
sizeof(*dyn_ops),
GFP_KERNEL);
256 dyn_ops->func = trace_selftest_test_dyn_func;
260 trace_selftest_test_global_cnt = 0;
266 if (trace_selftest_test_probe1_cnt != 2)
268 if (trace_selftest_test_probe2_cnt != 1)
270 if (trace_selftest_test_probe3_cnt != 3)
272 if (trace_selftest_test_global_cnt == 0)
274 if (trace_selftest_test_dyn_cnt == 0)
281 if (trace_selftest_test_probe1_cnt != 2)
283 if (trace_selftest_test_probe2_cnt != 2)
285 if (trace_selftest_test_probe3_cnt != 4)
305 if (trace_selftest_test_probe1_cnt ||
306 trace_selftest_test_probe2_cnt ||
307 trace_selftest_test_probe3_cnt ||
308 trace_selftest_test_global_cnt ||
309 trace_selftest_test_dyn_cnt)
312 ftrace_enabled = save_ftrace_enabled;
318 int trace_selftest_startup_dynamic_tracing(
struct tracer *
trace,
322 int save_ftrace_enabled = ftrace_enabled;
323 int save_tracer_enabled = tracer_enabled;
330 pr_info(
"Testing dynamic ftrace: ");
347 ftrace_set_global_filter(func_name,
strlen(func_name), 1);
352 warn_failed_init_tracer(trace, ret);
360 ret = trace_test_buffer(tr, &count);
381 ret = trace_test_buffer(tr, &count);
385 if (!ret && count != 1) {
393 ret = trace_selftest_ops(1);
397 ftrace_enabled = save_ftrace_enabled;
398 tracer_enabled = save_tracer_enabled;
401 ftrace_set_global_filter(
NULL, 0, 1);
405 ret = trace_selftest_ops(2);
410 static int trace_selftest_recursion_cnt;
411 static void trace_selftest_test_recursion_func(
unsigned long ip,
413 struct ftrace_ops *
op,
421 trace_selftest_recursion_cnt++;
425 static void trace_selftest_test_recursion_safe_func(
unsigned long ip,
427 struct ftrace_ops *
op,
437 if (trace_selftest_recursion_cnt++)
442 static struct ftrace_ops test_rec_probe = {
443 .func = trace_selftest_test_recursion_func,
446 static struct ftrace_ops test_recsafe_probe = {
447 .func = trace_selftest_test_recursion_safe_func,
448 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
452 trace_selftest_function_recursion(
void)
454 int save_ftrace_enabled = ftrace_enabled;
455 int save_tracer_enabled = tracer_enabled;
463 pr_info(
"Testing ftrace recursion: ");
476 pr_cont(
"*Could not set filter* ");
482 pr_cont(
"*could not register callback* ");
491 if (trace_selftest_recursion_cnt != 1) {
492 pr_cont(
"*callback not called once (%d)* ",
493 trace_selftest_recursion_cnt);
497 trace_selftest_recursion_cnt = 1;
500 pr_info(
"Testing ftrace recursion safe: ");
504 pr_cont(
"*Could not set filter* ");
510 pr_cont(
"*could not register callback* ");
528 if (trace_selftest_recursion_cnt != cnt) {
529 pr_cont(
"*callback not called expected %d times (%d)* ",
530 cnt, trace_selftest_recursion_cnt);
536 ftrace_enabled = save_ftrace_enabled;
537 tracer_enabled = save_tracer_enabled;
542 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
543 # define trace_selftest_function_recursion() ({ 0; })
547 TRACE_SELFTEST_REGS_START,
548 TRACE_SELFTEST_REGS_FOUND,
549 TRACE_SELFTEST_REGS_NOT_FOUND,
550 } trace_selftest_regs_stat;
552 static void trace_selftest_test_regs_func(
unsigned long ip,
554 struct ftrace_ops *op,
555 struct pt_regs *pt_regs)
558 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
560 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
563 static struct ftrace_ops test_regs_probe = {
564 .func = trace_selftest_test_regs_func,
565 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
569 trace_selftest_function_regs(
void)
571 int save_ftrace_enabled = ftrace_enabled;
572 int save_tracer_enabled = tracer_enabled;
578 #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
584 pr_info(
"Testing ftrace regs%s: ",
585 !supported ?
"(no arch support)" :
"");
600 if (ret && ret != -
ENODEV) {
601 pr_cont(
"*Could not set filter* ");
612 pr_cont(
"*registered save-regs without arch support* ");
615 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
619 pr_cont(
"*could not register callback* ");
630 switch (trace_selftest_regs_stat) {
631 case TRACE_SELFTEST_REGS_START:
632 pr_cont(
"*callback never called* ");
635 case TRACE_SELFTEST_REGS_FOUND:
638 pr_cont(
"*callback received regs without arch support* ");
641 case TRACE_SELFTEST_REGS_NOT_FOUND:
644 pr_cont(
"*callback received NULL regs* ");
650 ftrace_enabled = save_ftrace_enabled;
651 tracer_enabled = save_tracer_enabled;
664 int save_ftrace_enabled = ftrace_enabled;
665 int save_tracer_enabled = tracer_enabled;
678 warn_failed_init_tracer(trace, ret);
689 ret = trace_test_buffer(tr, &count);
693 if (!ret && !count) {
699 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
704 ret = trace_selftest_function_recursion();
708 ret = trace_selftest_function_regs();
710 ftrace_enabled = save_ftrace_enabled;
711 tracer_enabled = save_tracer_enabled;
722 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
725 #define GRAPH_MAX_FUNC_TEST 100000000
729 static unsigned int graph_hang_thresh;
735 if (
unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
751 trace_selftest_startup_function_graph(
struct tracer *trace,
764 &trace_graph_entry_watchdog);
766 warn_failed_init_tracer(trace, ret);
775 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
784 ret = trace_test_buffer(tr, &count);
789 if (!ret && !count) {
807 #ifdef CONFIG_IRQSOFF_TRACER
811 unsigned long save_max = tracing_max_latency;
818 warn_failed_init_tracer(trace, ret);
823 tracing_max_latency = 0;
839 ret = trace_test_buffer(tr,
NULL);
841 ret = trace_test_buffer(&max_tr, &count);
845 if (!ret && !count) {
850 tracing_max_latency = save_max;
856 #ifdef CONFIG_PREEMPT_TRACER
860 unsigned long save_max = tracing_max_latency;
880 warn_failed_init_tracer(trace, ret);
885 tracing_max_latency = 0;
901 ret = trace_test_buffer(tr,
NULL);
903 ret = trace_test_buffer(&max_tr, &count);
907 if (!ret && !count) {
912 tracing_max_latency = save_max;
918 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
920 trace_selftest_startup_preemptirqsoff(
struct tracer *trace,
struct trace_array *tr)
922 unsigned long save_max = tracing_max_latency;
942 warn_failed_init_tracer(trace, ret);
947 tracing_max_latency = 0;
967 ret = trace_test_buffer(tr,
NULL);
971 ret = trace_test_buffer(&max_tr, &count);
975 if (!ret && !count) {
982 tracing_max_latency = 0;
997 ret = trace_test_buffer(tr,
NULL);
1001 ret = trace_test_buffer(&max_tr, &count);
1003 if (!ret && !count) {
1013 tracing_max_latency = save_max;
1019 #ifdef CONFIG_NOP_TRACER
1028 #ifdef CONFIG_SCHED_TRACER
1029 static int trace_wakeup_test_thread(
void *
data)
1061 unsigned long save_max = tracing_max_latency;
1064 unsigned long count;
1067 init_completion(&isrt);
1070 p =
kthread_run(trace_wakeup_test_thread, &isrt,
"ftrace-test");
1082 warn_failed_init_tracer(trace, ret);
1087 tracing_max_latency = 0;
1098 init_completion(&isrt);
1108 ret = trace_test_buffer(tr,
NULL);
1110 ret = trace_test_buffer(&max_tr, &count);
1116 tracing_max_latency = save_max;
1121 if (!ret && !count) {
1130 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1132 trace_selftest_startup_sched_switch(
struct tracer *trace,
struct trace_array *tr)
1134 unsigned long count;
1140 warn_failed_init_tracer(trace, ret);
1149 ret = trace_test_buffer(tr, &count);
1153 if (!ret && !count) {
1162 #ifdef CONFIG_BRANCH_TRACER
1166 unsigned long count;
1172 warn_failed_init_tracer(trace, ret);
1181 ret = trace_test_buffer(tr, &count);
1185 if (!ret && !count) {