18 #include <linux/hw_breakpoint.h>
25 bool *visited = symbol__priv(sym);
30 static int test__vmlinux_matches_kallsyms(
void)
35 struct map *kallsyms_map, *vmlinux_map;
58 pr_debug(
"machine__create_kernel_maps ");
80 kallsyms_map = machine__kernel_map(&
kallsyms, type);
84 pr_debug(
"dso__find_symbol_by_name ");
96 pr_debug(
"machine__create_kernel_maps ");
100 vmlinux_map = machine__kernel_map(&vmlinux, type);
101 map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
115 vmlinux_matches_kallsyms_filter) <= 0) {
116 pr_debug(
"machine__load_vmlinux_path ");
129 struct symbol *pair, *first_pair;
130 bool backwards =
true;
153 if (llabs(skew) < page_size)
190 pr_info(
"Maps only in vmlinux:\n");
202 pos->
dso->short_name :
210 pr_info(
"Maps in vmlinux with a different name in kallsyms:\n");
216 if (pair ==
NULL || pair->priv)
223 if (pos->
pgoff != pair->pgoff || pos->
end != pair->
end)
225 pair->
start, pair->
end, pair->pgoff);
231 pr_info(
"Maps only in kallsyms:\n");
246 #include <sys/types.h>
248 static int trace_event__id(
const char *evname)
253 if (asprintf(&filename,
261 if (
read(
fd,
id,
sizeof(
id)) > 0)
270 static int test__open_syscall_event(
void)
276 unsigned int nr_open_calls = 111,
i;
277 int id = trace_event__id(
"sys_enter_open");
280 pr_debug(
"is debugfs mounted on /sys/kernel/debug?\n");
285 if (threads ==
NULL) {
296 goto out_thread_map_delete;
300 pr_debug(
"failed to open counter: %s, "
301 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
303 goto out_evsel_delete;
306 for (
i = 0;
i < nr_open_calls; ++
i) {
311 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
312 pr_debug(
"perf_evsel__read_on_cpu\n");
316 if (evsel->
counts->cpu[0].val != nr_open_calls) {
317 pr_debug(
"perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
318 nr_open_calls, evsel->
counts->cpu[0].val);
327 out_thread_map_delete:
334 static int test__open_syscall_event_on_all_cpus(
void)
336 int err = -1,
fd,
cpu;
341 unsigned int nr_open_calls = 111,
i;
343 int id = trace_event__id(
"sys_enter_open");
346 pr_debug(
"is debugfs mounted on /sys/kernel/debug?\n");
351 if (threads ==
NULL) {
359 goto out_thread_map_delete;
371 goto out_thread_map_delete;
375 pr_debug(
"failed to open counter: %s, "
376 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
378 goto out_evsel_delete;
381 for (cpu = 0; cpu < cpus->
nr; ++
cpu) {
382 unsigned int ncalls = nr_open_calls +
cpu;
389 if (cpus->
map[cpu] >= CPU_SETSIZE) {
394 CPU_SET(cpus->
map[cpu], &cpu_set);
396 pr_debug(
"sched_setaffinity() failed on CPU %d: %s ",
401 for (
i = 0;
i < ncalls; ++
i) {
405 CPU_CLR(cpus->
map[cpu], &cpu_set);
414 pr_debug(
"perf_evsel__alloc_counts(ncpus=%d)\n", cpus->
nr);
420 for (cpu = 0; cpu < cpus->
nr; ++
cpu) {
421 unsigned int expected;
423 if (cpus->
map[cpu] >= CPU_SETSIZE)
426 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
427 pr_debug(
"perf_evsel__read_on_cpu\n");
432 expected = nr_open_calls +
cpu;
433 if (evsel->
counts->cpu[cpu].val != expected) {
434 pr_debug(
"perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
435 expected, cpus->
map[cpu], evsel->
counts->cpu[cpu].val);
444 out_thread_map_delete:
460 static int test__basic_mmap(
void)
474 const char *syscall_names[] = {
"getsid",
"getppid",
"getpgrp",
476 pid_t (*syscalls[])(
void) = { (
void *)getsid, getppid, getpgrp,
478 #define nsyscalls ARRAY_SIZE(syscall_names)
487 snprintf(name,
sizeof(name),
"sys_enter_%s", syscall_names[i]);
488 ids[
i] = trace_event__id(name);
490 pr_debug(
"Is debugfs mounted on /sys/kernel/debug?\n");
494 expected_nr_events[
i] = random() % 257;
498 if (threads ==
NULL) {
506 goto out_free_threads;
510 CPU_SET(cpus->
map[0], &cpu_set);
513 pr_debug(
"sched_setaffinity() failed on CPU %d: %s ",
519 if (evlist ==
NULL) {
528 for (i = 0; i < nsyscalls; ++
i) {
531 if (evsels[i] ==
NULL) {
533 goto out_free_evlist;
539 pr_debug(
"failed to open counter: %s, "
540 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
547 pr_debug(
"failed to mmap events: %d (%s)\n", errno,
552 for (i = 0; i < nsyscalls; ++
i)
553 for (j = 0; j < expected_nr_events[
i]; ++
j) {
569 pr_err(
"Can't parse sample, err = %d\n", err);
576 " doesn't map to an evsel\n",
sample.id);
579 nr_events[evsel->
idx]++;
583 if (nr_events[evsel->
idx] != expected_nr_events[evsel->
idx]) {
584 pr_debug(
"expected %d %s events, got %d\n",
585 expected_nr_events[evsel->
idx],
595 for (i = 0; i < nsyscalls; ++
i)
607 static int sched__get_first_possible_cpu(
pid_t pid, cpu_set_t *maskp)
609 int i, cpu = -1, nrcpus = 1024;
614 if (errno ==
EINVAL && nrcpus < (1024 << 8)) {
615 nrcpus = nrcpus << 2;
618 perror(
"sched_getaffinity");
622 for (i = 0; i < nrcpus; i++) {
623 if (CPU_ISSET(i, maskp)) {
634 static int test__PERF_RECORD(
void)
646 size_t cpu_mask_size =
sizeof(
cpu_mask);
650 const char *
cmd =
"sleep";
651 const char *argv[] = {
cmd,
"1",
NULL, };
654 bool found_cmd_mmap =
false,
655 found_libc_mmap =
false,
656 found_vdso_mmap =
false,
657 found_ld_mmap =
false;
658 int err = -1, errs = 0,
i, wakeups = 0;
662 if (evlist ==
NULL || argv ==
NULL) {
663 pr_debug(
"Not enough memory to create evlist\n");
673 pr_debug(
"Not enough memory to create evsel\n");
674 goto out_delete_evlist;
685 pr_debug(
"Not enough memory to create thread/cpu maps\n");
686 goto out_delete_evlist;
697 pr_debug(
"Couldn't run the workload!\n");
698 goto out_delete_evlist;
704 evsel = perf_evlist__first(evlist);
710 err = sched__get_first_possible_cpu(evlist->
workload.pid, &cpu_mask);
713 goto out_delete_evlist;
723 goto out_delete_evlist;
733 goto out_delete_evlist;
744 goto out_delete_evlist;
759 int before = total_events;
761 for (i = 0; i < evlist->
nr_mmaps; i++) {
765 const u32 type =
event->header.type;
776 pr_debug(
"Couldn't parse sample\n");
785 if (prev_time >
sample.time) {
787 name, prev_time,
sample.time);
794 pr_debug(
"%s with unexpected cpu, expected %d, got %d\n",
800 pr_debug(
"%s with unexpected pid, expected %d, got %d\n",
806 pr_debug(
"%s with unexpected tid, expected %d, got %d\n",
816 pr_debug(
"%s with unexpected pid/tid\n", name);
822 event->
comm.pid != event->
comm.tid) {
823 pr_debug(
"%s with different pid/tid!\n", name);
830 pr_debug(
"%s with unexpected comm!\n", name);
840 found_cmd_mmap = !
strcmp(bname + 1, cmd);
841 if (!found_libc_mmap)
842 found_libc_mmap = !
strncmp(bname + 1,
"libc", 4);
844 found_ld_mmap = !
strncmp(bname + 1,
"ld", 2);
845 }
else if (!found_vdso_mmap)
846 found_vdso_mmap = !
strcmp(event->
mmap.filename,
"[vdso]");
853 pr_debug(
"Unexpected perf_event->header.type %d!\n",
865 if (total_events == before &&
false)
870 pr_debug(
"No PERF_RECORD_EXIT event!\n");
877 pr_debug(
"Excessive number of PERF_RECORD_COMM events!\n");
882 pr_debug(
"Missing PERF_RECORD_COMM for %s!\n", cmd);
886 if (!found_cmd_mmap) {
887 pr_debug(
"PERF_RECORD_MMAP for %s missing!\n", cmd);
891 if (!found_libc_mmap) {
892 pr_debug(
"PERF_RECORD_MMAP for %s missing!\n",
"libc");
896 if (!found_ld_mmap) {
897 pr_debug(
"PERF_RECORD_MMAP for %s missing!\n",
"ld");
901 if (!found_vdso_mmap) {
902 pr_debug(
"PERF_RECORD_MMAP for %s missing!\n",
"[vdso]");
910 return (err < 0 || errs > 0) ? -1 : 0;
914 #if defined(__x86_64__) || defined(__i386__)
916 #define barrier() asm volatile("" ::: "memory")
922 asm volatile(
"rdpmc" :
"=a" (
low),
"=d" (high) :
"c" (
counter));
924 return low | ((
u64)high) << 32;
931 asm volatile(
"rdtsc" :
"=a" (
low),
"=d" (high));
933 return low | ((
u64)high) << 32;
936 static u64 mmap_read_self(
void *
addr)
959 count += rdpmc(idx - 1);
962 }
while (pc->
lock != seq);
976 quot = count / running;
977 rem = count % running;
989 void *
uc __maybe_unused)
994 static int __test__rdpmc(
void)
996 long page_size = sysconf(_SC_PAGE_SIZE);
997 volatile int tmp = 0;
1005 .exclude_kernel = 1,
1010 sigfillset(&
sa.sa_mask);
1011 sa.sa_sigaction = segfault_handler;
1016 pr_err(
"Error: sys_perf_event_open() syscall returned "
1017 "with %d (%s)\n", fd,
strerror(errno));
1022 if (addr == (
void *)(-1)) {
1023 pr_err(
"Error: mmap() syscall returned with (%s)\n",
1028 for (n = 0; n < 6; n++) {
1031 stamp = mmap_read_self(addr);
1033 for (i = 0; i < loops; i++)
1036 now = mmap_read_self(addr);
1039 delta = now -
stamp;
1040 pr_debug(
"%14d: %14Lu\n", n, (
long long)delta);
1045 munmap(addr, page_size);
1056 static int test__rdpmc(
void)
1068 ret = __test__rdpmc();
1073 wret = waitpid(pid, &status, 0);
1074 if (wret < 0 || status)
1082 static int test__perf_pmu(
void)
1087 static int perf_evsel__roundtrip_cache_name_test(
void)
1105 name,
sizeof(name));
1114 evsel = perf_evlist__first(evlist);
1124 name,
sizeof(name));
1125 if (evsel->
idx != idx)
1135 evsel = perf_evsel__next(evsel);
1144 static int __perf_evsel__name_array_test(
const char *names[],
int nr_names)
1153 for (i = 0; i < nr_names; ++
i) {
1156 pr_debug(
"failed to parse event '%s', err %d\n",
1158 goto out_delete_evlist;
1175 #define perf_evsel__name_array_test(names) \
1176 __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
1178 static int perf_evsel__roundtrip_name_test(
void)
1180 int err = 0, ret = 0;
1190 err = perf_evsel__roundtrip_cache_name_test();
1197 static int perf_evsel__test_field(
struct perf_evsel *evsel,
const char *name,
1198 int size,
bool should_be_signed)
1204 if (field ==
NULL) {
1205 pr_debug(
"%s: \"%s\" field not found!\n", evsel->
name, name);
1210 if (should_be_signed && !is_signed) {
1211 pr_debug(
"%s: \"%s\" signedness(%d) is wrong, should be %d\n",
1212 evsel->
name, name, is_signed, should_be_signed);
1216 if (field->
size != size) {
1217 pr_debug(
"%s: \"%s\" size (%d) should be %d!\n",
1218 evsel->
name, name, field->
size, size);
1225 static int perf_evsel__tp_sched_test(
void)
1230 if (evsel ==
NULL) {
1235 if (perf_evsel__test_field(evsel,
"prev_comm", 16,
true))
1238 if (perf_evsel__test_field(evsel,
"prev_pid", 4,
true))
1241 if (perf_evsel__test_field(evsel,
"prev_prio", 4,
true))
1244 if (perf_evsel__test_field(evsel,
"prev_state", 8,
true))
1247 if (perf_evsel__test_field(evsel,
"next_comm", 16,
true))
1250 if (perf_evsel__test_field(evsel,
"next_pid", 4,
true))
1253 if (perf_evsel__test_field(evsel,
"next_prio", 4,
true))
1260 if (perf_evsel__test_field(evsel,
"comm", 16,
true))
1263 if (perf_evsel__test_field(evsel,
"pid", 4,
true))
1266 if (perf_evsel__test_field(evsel,
"prio", 4,
true))
1269 if (perf_evsel__test_field(evsel,
"success", 4,
true))
1272 if (perf_evsel__test_field(evsel,
"target_cpu", 4,
true))
1278 static int test__syscall_open_tp_fields(
void)
1288 .raw_samples =
true,
1290 const char *filename =
"/etc/passwd";
1294 int err = -1,
i, nr_events = 0, nr_polls = 0;
1296 if (evlist ==
NULL) {
1297 pr_debug(
"%s: perf_evlist__new\n", __func__);
1302 if (evsel ==
NULL) {
1303 pr_debug(
"%s: perf_evsel__newtp\n", __func__);
1304 goto out_delete_evlist;
1311 pr_debug(
"%s: perf_evlist__create_maps\n", __func__);
1312 goto out_delete_evlist;
1317 evlist->
threads->map[0] = getpid();
1322 goto out_delete_evlist;
1328 goto out_delete_evlist;
1336 open(filename, flags);
1339 int before = nr_events;
1341 for (i = 0; i < evlist->
nr_mmaps; i++) {
1345 const u32 type =
event->header.type;
1356 pr_err(
"Can't parse sample, err = %d\n", err);
1362 if (flags != tp_flags) {
1363 pr_debug(
"%s: Expected flags=%#x, got %#x\n",
1364 __func__, flags, tp_flags);
1372 if (nr_events == before)
1375 if (++nr_polls > 5) {
1376 pr_debug(
"%s: no events!\n", __func__);
1390 static struct test {
1395 .desc =
"vmlinux symtab matches kallsyms",
1396 .func = test__vmlinux_matches_kallsyms,
1399 .desc =
"detect open syscall event",
1400 .func = test__open_syscall_event,
1403 .desc =
"detect open syscall event on all cpus",
1404 .func = test__open_syscall_event_on_all_cpus,
1407 .desc =
"read samples using the mmap interface",
1408 .func = test__basic_mmap,
1411 .desc =
"parse events tests",
1414 #if defined(__x86_64__) || defined(__i386__)
1416 .desc =
"x86 rdpmc test",
1417 .func = test__rdpmc,
1421 .desc =
"Validate PERF_RECORD_* events & perf_sample fields",
1422 .func = test__PERF_RECORD,
1425 .desc =
"Test perf pmu format parsing",
1426 .func = test__perf_pmu,
1429 .desc =
"Test dso data interface",
1433 .desc =
"roundtrip evsel->name check",
1434 .func = perf_evsel__roundtrip_name_test,
1437 .desc =
"Check parsing of sched tracepoints fields",
1438 .func = perf_evsel__tp_sched_test,
1441 .desc =
"Generate and check syscalls:sys_enter_open event fields",
1442 .func = test__syscall_open_tp_fields,
1449 static bool perf_test__matches(
int curr,
int argc,
const char *argv[])
1456 for (i = 0; i <
argc; ++
i) {
1458 long nr = strtoul(argv[i], &end, 10);
1473 static int __cmd_test(
int argc,
const char *argv[])
1477 while (tests[i].
func) {
1478 int curr = i++,
err;
1480 if (!perf_test__matches(curr, argc, argv))
1485 err = tests[
curr].func();
1487 pr_info(
" %s\n", err ?
"FAILED!\n" :
"Ok");
1493 static int perf_test__list(
int argc,
const char **argv)
1497 while (tests[i].func) {
1500 if (argc > 1 && !
strstr(tests[curr].
desc, argv[1]))
1503 pr_info(
"%2d: %s\n", i, tests[curr].desc);
1511 const char *
const test_usage[] = {
1512 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1515 const struct option test_options[] = {
1517 "be more verbose (show symbol address, etc)"),
1521 argc =
parse_options(argc, argv, test_options, test_usage, 0);
1522 if (argc >= 1 && !
strcmp(argv[0],
"list"))
1523 return perf_test__list(argc, argv);
1532 return __cmd_test(argc, argv);