Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
builtin-test.c
Go to the documentation of this file.
1 /*
2  * builtin-test.c
3  *
4  * Builtin regression testing command: ever growing number of sanity tests
5  */
6 #include "builtin.h"
7 
8 #include "util/cache.h"
9 #include "util/debug.h"
10 #include "util/debugfs.h"
11 #include "util/evlist.h"
12 #include "util/parse-options.h"
13 #include "util/parse-events.h"
14 #include "util/symbol.h"
15 #include "util/thread_map.h"
16 #include "util/pmu.h"
17 #include "event-parse.h"
18 #include <linux/hw_breakpoint.h>
19 
20 #include <sys/mman.h>
21 
22 static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
23  struct symbol *sym)
24 {
25  bool *visited = symbol__priv(sym);
26  *visited = true;
27  return 0;
28 }
29 
30 static int test__vmlinux_matches_kallsyms(void)
31 {
32  int err = -1;
33  struct rb_node *nd;
34  struct symbol *sym;
35  struct map *kallsyms_map, *vmlinux_map;
36  struct machine kallsyms, vmlinux;
38  long page_size = sysconf(_SC_PAGE_SIZE);
39  struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
40 
41  /*
42  * Step 1:
43  *
44  * Init the machines that will hold kernel, modules obtained from
45  * both vmlinux + .ko files and from /proc/kallsyms split by modules.
46  */
48  machine__init(&vmlinux, "", HOST_KERNEL_ID);
49 
50  /*
51  * Step 2:
52  *
53  * Create the kernel maps for kallsyms and the DSO where we will then
54  * load /proc/kallsyms. Also create the modules maps from /proc/modules
55  * and find the .ko files that match them in /lib/modules/`uname -r`/.
56  */
58  pr_debug("machine__create_kernel_maps ");
59  return -1;
60  }
61 
62  /*
63  * Step 3:
64  *
65  * Load and split /proc/kallsyms into multiple maps, one per module.
66  */
67  if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
68  pr_debug("dso__load_kallsyms ");
69  goto out;
70  }
71 
72  /*
73  * Step 4:
74  *
75  * kallsyms will be internally on demand sorted by name so that we can
76  * find the reference relocation * symbol, i.e. the symbol we will use
77  * to see if the running kernel was relocated by checking if it has the
78  * same value in the vmlinux file we load.
79  */
80  kallsyms_map = machine__kernel_map(&kallsyms, type);
81 
82  sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
83  if (sym == NULL) {
84  pr_debug("dso__find_symbol_by_name ");
85  goto out;
86  }
87 
88  ref_reloc_sym.addr = sym->start;
89 
90  /*
91  * Step 5:
92  *
93  * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
94  */
95  if (machine__create_kernel_maps(&vmlinux) < 0) {
96  pr_debug("machine__create_kernel_maps ");
97  goto out;
98  }
99 
100  vmlinux_map = machine__kernel_map(&vmlinux, type);
101  map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
102 
103  /*
104  * Step 6:
105  *
106  * Locate a vmlinux file in the vmlinux path that has a buildid that
107  * matches the one of the running kernel.
108  *
109  * While doing that look if we find the ref reloc symbol, if we find it
110  * we'll have its ref_reloc_symbol.unrelocated_addr and then
111  * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
112  * to fixup the symbols.
113  */
114  if (machine__load_vmlinux_path(&vmlinux, type,
115  vmlinux_matches_kallsyms_filter) <= 0) {
116  pr_debug("machine__load_vmlinux_path ");
117  goto out;
118  }
119 
120  err = 0;
121  /*
122  * Step 7:
123  *
124  * Now look at the symbols in the vmlinux DSO and check if we find all of them
125  * in the kallsyms dso. For the ones that are in both, check its names and
126  * end addresses too.
127  */
128  for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
129  struct symbol *pair, *first_pair;
130  bool backwards = true;
131 
132  sym = rb_entry(nd, struct symbol, rb_node);
133 
134  if (sym->start == sym->end)
135  continue;
136 
137  first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
138  pair = first_pair;
139 
140  if (pair && pair->start == sym->start) {
141 next_pair:
142  if (strcmp(sym->name, pair->name) == 0) {
143  /*
144  * kallsyms don't have the symbol end, so we
145  * set that by using the next symbol start - 1,
146  * in some cases we get this up to a page
147  * wrong, trace_kmalloc when I was developing
148  * this code was one such example, 2106 bytes
149  * off the real size. More than that and we
150  * _really_ have a problem.
151  */
152  s64 skew = sym->end - pair->end;
153  if (llabs(skew) < page_size)
154  continue;
155 
156  pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
157  sym->start, sym->name, sym->end, pair->end);
158  } else {
159  struct rb_node *nnd;
160 detour:
161  nnd = backwards ? rb_prev(&pair->rb_node) :
162  rb_next(&pair->rb_node);
163  if (nnd) {
164  struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
165 
166  if (next->start == sym->start) {
167  pair = next;
168  goto next_pair;
169  }
170  }
171 
172  if (backwards) {
173  backwards = false;
174  pair = first_pair;
175  goto detour;
176  }
177 
178  pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
179  sym->start, sym->name, pair->name);
180  }
181  } else
182  pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
183 
184  err = -1;
185  }
186 
187  if (!verbose)
188  goto out;
189 
190  pr_info("Maps only in vmlinux:\n");
191 
192  for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
193  struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
194  /*
195  * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
196  * the kernel will have the path for the vmlinux file being used,
197  * so use the short name, less descriptive but the same ("[kernel]" in
198  * both cases.
199  */
200  pair = map_groups__find_by_name(&kallsyms.kmaps, type,
201  (pos->dso->kernel ?
202  pos->dso->short_name :
203  pos->dso->name));
204  if (pair)
205  pair->priv = 1;
206  else
207  map__fprintf(pos, stderr);
208  }
209 
210  pr_info("Maps in vmlinux with a different name in kallsyms:\n");
211 
212  for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
213  struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
214 
215  pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
216  if (pair == NULL || pair->priv)
217  continue;
218 
219  if (pair->start == pos->start) {
220  pair->priv = 1;
221  pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
222  pos->start, pos->end, pos->pgoff, pos->dso->name);
223  if (pos->pgoff != pair->pgoff || pos->end != pair->end)
224  pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
225  pair->start, pair->end, pair->pgoff);
226  pr_info(" %s\n", pair->dso->name);
227  pair->priv = 1;
228  }
229  }
230 
231  pr_info("Maps only in kallsyms:\n");
232 
233  for (nd = rb_first(&kallsyms.kmaps.maps[type]);
234  nd; nd = rb_next(nd)) {
235  struct map *pos = rb_entry(nd, struct map, rb_node);
236 
237  if (!pos->priv)
238  map__fprintf(pos, stderr);
239  }
240 out:
241  return err;
242 }
243 
244 #include "util/cpumap.h"
245 #include "util/evsel.h"
246 #include <sys/types.h>
247 
248 static int trace_event__id(const char *evname)
249 {
250  char *filename;
251  int err = -1, fd;
252 
253  if (asprintf(&filename,
254  "%s/syscalls/%s/id",
255  tracing_events_path, evname) < 0)
256  return -1;
257 
258  fd = open(filename, O_RDONLY);
259  if (fd >= 0) {
260  char id[16];
261  if (read(fd, id, sizeof(id)) > 0)
262  err = atoi(id);
263  close(fd);
264  }
265 
266  free(filename);
267  return err;
268 }
269 
270 static int test__open_syscall_event(void)
271 {
272  int err = -1, fd;
273  struct thread_map *threads;
274  struct perf_evsel *evsel;
275  struct perf_event_attr attr;
276  unsigned int nr_open_calls = 111, i;
277  int id = trace_event__id("sys_enter_open");
278 
279  if (id < 0) {
280  pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
281  return -1;
282  }
283 
284  threads = thread_map__new(-1, getpid(), UINT_MAX);
285  if (threads == NULL) {
286  pr_debug("thread_map__new\n");
287  return -1;
288  }
289 
290  memset(&attr, 0, sizeof(attr));
291  attr.type = PERF_TYPE_TRACEPOINT;
292  attr.config = id;
293  evsel = perf_evsel__new(&attr, 0);
294  if (evsel == NULL) {
295  pr_debug("perf_evsel__new\n");
296  goto out_thread_map_delete;
297  }
298 
299  if (perf_evsel__open_per_thread(evsel, threads) < 0) {
300  pr_debug("failed to open counter: %s, "
301  "tweak /proc/sys/kernel/perf_event_paranoid?\n",
302  strerror(errno));
303  goto out_evsel_delete;
304  }
305 
306  for (i = 0; i < nr_open_calls; ++i) {
307  fd = open("/etc/passwd", O_RDONLY);
308  close(fd);
309  }
310 
311  if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
312  pr_debug("perf_evsel__read_on_cpu\n");
313  goto out_close_fd;
314  }
315 
316  if (evsel->counts->cpu[0].val != nr_open_calls) {
317  pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
318  nr_open_calls, evsel->counts->cpu[0].val);
319  goto out_close_fd;
320  }
321 
322  err = 0;
323 out_close_fd:
324  perf_evsel__close_fd(evsel, 1, threads->nr);
325 out_evsel_delete:
326  perf_evsel__delete(evsel);
327 out_thread_map_delete:
328  thread_map__delete(threads);
329  return err;
330 }
331 
332 #include <sched.h>
333 
334 static int test__open_syscall_event_on_all_cpus(void)
335 {
336  int err = -1, fd, cpu;
337  struct thread_map *threads;
338  struct cpu_map *cpus;
339  struct perf_evsel *evsel;
340  struct perf_event_attr attr;
341  unsigned int nr_open_calls = 111, i;
342  cpu_set_t cpu_set;
343  int id = trace_event__id("sys_enter_open");
344 
345  if (id < 0) {
346  pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
347  return -1;
348  }
349 
350  threads = thread_map__new(-1, getpid(), UINT_MAX);
351  if (threads == NULL) {
352  pr_debug("thread_map__new\n");
353  return -1;
354  }
355 
356  cpus = cpu_map__new(NULL);
357  if (cpus == NULL) {
358  pr_debug("cpu_map__new\n");
359  goto out_thread_map_delete;
360  }
361 
362 
363  CPU_ZERO(&cpu_set);
364 
365  memset(&attr, 0, sizeof(attr));
366  attr.type = PERF_TYPE_TRACEPOINT;
367  attr.config = id;
368  evsel = perf_evsel__new(&attr, 0);
369  if (evsel == NULL) {
370  pr_debug("perf_evsel__new\n");
371  goto out_thread_map_delete;
372  }
373 
374  if (perf_evsel__open(evsel, cpus, threads) < 0) {
375  pr_debug("failed to open counter: %s, "
376  "tweak /proc/sys/kernel/perf_event_paranoid?\n",
377  strerror(errno));
378  goto out_evsel_delete;
379  }
380 
381  for (cpu = 0; cpu < cpus->nr; ++cpu) {
382  unsigned int ncalls = nr_open_calls + cpu;
383  /*
384  * XXX eventually lift this restriction in a way that
385  * keeps perf building on older glibc installations
386  * without CPU_ALLOC. 1024 cpus in 2010 still seems
387  * a reasonable upper limit tho :-)
388  */
389  if (cpus->map[cpu] >= CPU_SETSIZE) {
390  pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
391  continue;
392  }
393 
394  CPU_SET(cpus->map[cpu], &cpu_set);
395  if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
396  pr_debug("sched_setaffinity() failed on CPU %d: %s ",
397  cpus->map[cpu],
398  strerror(errno));
399  goto out_close_fd;
400  }
401  for (i = 0; i < ncalls; ++i) {
402  fd = open("/etc/passwd", O_RDONLY);
403  close(fd);
404  }
405  CPU_CLR(cpus->map[cpu], &cpu_set);
406  }
407 
408  /*
409  * Here we need to explicitely preallocate the counts, as if
410  * we use the auto allocation it will allocate just for 1 cpu,
411  * as we start by cpu 0.
412  */
413  if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
414  pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
415  goto out_close_fd;
416  }
417 
418  err = 0;
419 
420  for (cpu = 0; cpu < cpus->nr; ++cpu) {
421  unsigned int expected;
422 
423  if (cpus->map[cpu] >= CPU_SETSIZE)
424  continue;
425 
426  if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
427  pr_debug("perf_evsel__read_on_cpu\n");
428  err = -1;
429  break;
430  }
431 
432  expected = nr_open_calls + cpu;
433  if (evsel->counts->cpu[cpu].val != expected) {
434  pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
435  expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
436  err = -1;
437  }
438  }
439 
440 out_close_fd:
441  perf_evsel__close_fd(evsel, 1, threads->nr);
442 out_evsel_delete:
443  perf_evsel__delete(evsel);
444 out_thread_map_delete:
445  thread_map__delete(threads);
446  return err;
447 }
448 
449 /*
450  * This test will generate random numbers of calls to some getpid syscalls,
451  * then establish an mmap for a group of events that are created to monitor
452  * the syscalls.
453  *
454  * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
455  * sample.id field to map back to its respective perf_evsel instance.
456  *
457  * Then it checks if the number of syscalls reported as perf events by
458  * the kernel corresponds to the number of syscalls made.
459  */
460 static int test__basic_mmap(void)
461 {
462  int err = -1;
463  union perf_event *event;
464  struct thread_map *threads;
465  struct cpu_map *cpus;
466  struct perf_evlist *evlist;
467  struct perf_event_attr attr = {
468  .type = PERF_TYPE_TRACEPOINT,
469  .read_format = PERF_FORMAT_ID,
470  .sample_type = PERF_SAMPLE_ID,
471  .watermark = 0,
472  };
473  cpu_set_t cpu_set;
474  const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
475  "getpgid", };
476  pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
477  (void*)getpgid };
478 #define nsyscalls ARRAY_SIZE(syscall_names)
479  int ids[nsyscalls];
480  unsigned int nr_events[nsyscalls],
481  expected_nr_events[nsyscalls], i, j;
482  struct perf_evsel *evsels[nsyscalls], *evsel;
483 
484  for (i = 0; i < nsyscalls; ++i) {
485  char name[64];
486 
487  snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
488  ids[i] = trace_event__id(name);
489  if (ids[i] < 0) {
490  pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
491  return -1;
492  }
493  nr_events[i] = 0;
494  expected_nr_events[i] = random() % 257;
495  }
496 
497  threads = thread_map__new(-1, getpid(), UINT_MAX);
498  if (threads == NULL) {
499  pr_debug("thread_map__new\n");
500  return -1;
501  }
502 
503  cpus = cpu_map__new(NULL);
504  if (cpus == NULL) {
505  pr_debug("cpu_map__new\n");
506  goto out_free_threads;
507  }
508 
509  CPU_ZERO(&cpu_set);
510  CPU_SET(cpus->map[0], &cpu_set);
511  sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
512  if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
513  pr_debug("sched_setaffinity() failed on CPU %d: %s ",
514  cpus->map[0], strerror(errno));
515  goto out_free_cpus;
516  }
517 
518  evlist = perf_evlist__new(cpus, threads);
519  if (evlist == NULL) {
520  pr_debug("perf_evlist__new\n");
521  goto out_free_cpus;
522  }
523 
524  /* anonymous union fields, can't be initialized above */
525  attr.wakeup_events = 1;
526  attr.sample_period = 1;
527 
528  for (i = 0; i < nsyscalls; ++i) {
529  attr.config = ids[i];
530  evsels[i] = perf_evsel__new(&attr, i);
531  if (evsels[i] == NULL) {
532  pr_debug("perf_evsel__new\n");
533  goto out_free_evlist;
534  }
535 
536  perf_evlist__add(evlist, evsels[i]);
537 
538  if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
539  pr_debug("failed to open counter: %s, "
540  "tweak /proc/sys/kernel/perf_event_paranoid?\n",
541  strerror(errno));
542  goto out_close_fd;
543  }
544  }
545 
546  if (perf_evlist__mmap(evlist, 128, true) < 0) {
547  pr_debug("failed to mmap events: %d (%s)\n", errno,
548  strerror(errno));
549  goto out_close_fd;
550  }
551 
552  for (i = 0; i < nsyscalls; ++i)
553  for (j = 0; j < expected_nr_events[i]; ++j) {
554  int foo = syscalls[i]();
555  ++foo;
556  }
557 
558  while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
559  struct perf_sample sample;
560 
561  if (event->header.type != PERF_RECORD_SAMPLE) {
562  pr_debug("unexpected %s event\n",
563  perf_event__name(event->header.type));
564  goto out_munmap;
565  }
566 
567  err = perf_evlist__parse_sample(evlist, event, &sample);
568  if (err) {
569  pr_err("Can't parse sample, err = %d\n", err);
570  goto out_munmap;
571  }
572 
573  evsel = perf_evlist__id2evsel(evlist, sample.id);
574  if (evsel == NULL) {
575  pr_debug("event with id %" PRIu64
576  " doesn't map to an evsel\n", sample.id);
577  goto out_munmap;
578  }
579  nr_events[evsel->idx]++;
580  }
581 
582  list_for_each_entry(evsel, &evlist->entries, node) {
583  if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
584  pr_debug("expected %d %s events, got %d\n",
585  expected_nr_events[evsel->idx],
586  perf_evsel__name(evsel), nr_events[evsel->idx]);
587  goto out_munmap;
588  }
589  }
590 
591  err = 0;
592 out_munmap:
593  perf_evlist__munmap(evlist);
594 out_close_fd:
595  for (i = 0; i < nsyscalls; ++i)
596  perf_evsel__close_fd(evsels[i], 1, threads->nr);
597 out_free_evlist:
598  perf_evlist__delete(evlist);
599 out_free_cpus:
600  cpu_map__delete(cpus);
601 out_free_threads:
602  thread_map__delete(threads);
603  return err;
604 #undef nsyscalls
605 }
606 
607 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
608 {
609  int i, cpu = -1, nrcpus = 1024;
610 realloc:
611  CPU_ZERO(maskp);
612 
613  if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
614  if (errno == EINVAL && nrcpus < (1024 << 8)) {
615  nrcpus = nrcpus << 2;
616  goto realloc;
617  }
618  perror("sched_getaffinity");
619  return -1;
620  }
621 
622  for (i = 0; i < nrcpus; i++) {
623  if (CPU_ISSET(i, maskp)) {
624  if (cpu == -1)
625  cpu = i;
626  else
627  CPU_CLR(i, maskp);
628  }
629  }
630 
631  return cpu;
632 }
633 
634 static int test__PERF_RECORD(void)
635 {
636  struct perf_record_opts opts = {
637  .target = {
638  .uid = UINT_MAX,
639  .uses_mmap = true,
640  },
641  .no_delay = true,
642  .freq = 10,
643  .mmap_pages = 256,
644  };
645  cpu_set_t cpu_mask;
646  size_t cpu_mask_size = sizeof(cpu_mask);
647  struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
648  struct perf_evsel *evsel;
649  struct perf_sample sample;
650  const char *cmd = "sleep";
651  const char *argv[] = { cmd, "1", NULL, };
652  char *bname;
653  u64 prev_time = 0;
654  bool found_cmd_mmap = false,
655  found_libc_mmap = false,
656  found_vdso_mmap = false,
657  found_ld_mmap = false;
658  int err = -1, errs = 0, i, wakeups = 0;
659  u32 cpu;
660  int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
661 
662  if (evlist == NULL || argv == NULL) {
663  pr_debug("Not enough memory to create evlist\n");
664  goto out;
665  }
666 
667  /*
668  * We need at least one evsel in the evlist, use the default
669  * one: "cycles".
670  */
671  err = perf_evlist__add_default(evlist);
672  if (err < 0) {
673  pr_debug("Not enough memory to create evsel\n");
674  goto out_delete_evlist;
675  }
676 
677  /*
678  * Create maps of threads and cpus to monitor. In this case
679  * we start with all threads and cpus (-1, -1) but then in
680  * perf_evlist__prepare_workload we'll fill in the only thread
681  * we're monitoring, the one forked there.
682  */
683  err = perf_evlist__create_maps(evlist, &opts.target);
684  if (err < 0) {
685  pr_debug("Not enough memory to create thread/cpu maps\n");
686  goto out_delete_evlist;
687  }
688 
689  /*
690  * Prepare the workload in argv[] to run, it'll fork it, and then wait
691  * for perf_evlist__start_workload() to exec it. This is done this way
692  * so that we have time to open the evlist (calling sys_perf_event_open
693  * on all the fds) and then mmap them.
694  */
695  err = perf_evlist__prepare_workload(evlist, &opts, argv);
696  if (err < 0) {
697  pr_debug("Couldn't run the workload!\n");
698  goto out_delete_evlist;
699  }
700 
701  /*
702  * Config the evsels, setting attr->comm on the first one, etc.
703  */
704  evsel = perf_evlist__first(evlist);
705  evsel->attr.sample_type |= PERF_SAMPLE_CPU;
706  evsel->attr.sample_type |= PERF_SAMPLE_TID;
707  evsel->attr.sample_type |= PERF_SAMPLE_TIME;
708  perf_evlist__config_attrs(evlist, &opts);
709 
710  err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
711  if (err < 0) {
712  pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
713  goto out_delete_evlist;
714  }
715 
716  cpu = err;
717 
718  /*
719  * So that we can check perf_sample.cpu on all the samples.
720  */
721  if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
722  pr_debug("sched_setaffinity: %s\n", strerror(errno));
723  goto out_delete_evlist;
724  }
725 
726  /*
727  * Call sys_perf_event_open on all the fds on all the evsels,
728  * grouping them if asked to.
729  */
730  err = perf_evlist__open(evlist);
731  if (err < 0) {
732  pr_debug("perf_evlist__open: %s\n", strerror(errno));
733  goto out_delete_evlist;
734  }
735 
736  /*
737  * mmap the first fd on a given CPU and ask for events for the other
738  * fds in the same CPU to be injected in the same mmap ring buffer
739  * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
740  */
741  err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
742  if (err < 0) {
743  pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
744  goto out_delete_evlist;
745  }
746 
747  /*
748  * Now that all is properly set up, enable the events, they will
749  * count just on workload.pid, which will start...
750  */
751  perf_evlist__enable(evlist);
752 
753  /*
754  * Now!
755  */
757 
758  while (1) {
759  int before = total_events;
760 
761  for (i = 0; i < evlist->nr_mmaps; i++) {
762  union perf_event *event;
763 
764  while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
765  const u32 type = event->header.type;
766  const char *name = perf_event__name(type);
767 
768  ++total_events;
769  if (type < PERF_RECORD_MAX)
770  nr_events[type]++;
771 
772  err = perf_evlist__parse_sample(evlist, event, &sample);
773  if (err < 0) {
774  if (verbose)
775  perf_event__fprintf(event, stderr);
776  pr_debug("Couldn't parse sample\n");
777  goto out_err;
778  }
779 
780  if (verbose) {
781  pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
782  perf_event__fprintf(event, stderr);
783  }
784 
785  if (prev_time > sample.time) {
786  pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
787  name, prev_time, sample.time);
788  ++errs;
789  }
790 
791  prev_time = sample.time;
792 
793  if (sample.cpu != cpu) {
794  pr_debug("%s with unexpected cpu, expected %d, got %d\n",
795  name, cpu, sample.cpu);
796  ++errs;
797  }
798 
799  if ((pid_t)sample.pid != evlist->workload.pid) {
800  pr_debug("%s with unexpected pid, expected %d, got %d\n",
801  name, evlist->workload.pid, sample.pid);
802  ++errs;
803  }
804 
805  if ((pid_t)sample.tid != evlist->workload.pid) {
806  pr_debug("%s with unexpected tid, expected %d, got %d\n",
807  name, evlist->workload.pid, sample.tid);
808  ++errs;
809  }
810 
811  if ((type == PERF_RECORD_COMM ||
812  type == PERF_RECORD_MMAP ||
813  type == PERF_RECORD_FORK ||
814  type == PERF_RECORD_EXIT) &&
815  (pid_t)event->comm.pid != evlist->workload.pid) {
816  pr_debug("%s with unexpected pid/tid\n", name);
817  ++errs;
818  }
819 
820  if ((type == PERF_RECORD_COMM ||
821  type == PERF_RECORD_MMAP) &&
822  event->comm.pid != event->comm.tid) {
823  pr_debug("%s with different pid/tid!\n", name);
824  ++errs;
825  }
826 
827  switch (type) {
828  case PERF_RECORD_COMM:
829  if (strcmp(event->comm.comm, cmd)) {
830  pr_debug("%s with unexpected comm!\n", name);
831  ++errs;
832  }
833  break;
834  case PERF_RECORD_EXIT:
835  goto found_exit;
836  case PERF_RECORD_MMAP:
837  bname = strrchr(event->mmap.filename, '/');
838  if (bname != NULL) {
839  if (!found_cmd_mmap)
840  found_cmd_mmap = !strcmp(bname + 1, cmd);
841  if (!found_libc_mmap)
842  found_libc_mmap = !strncmp(bname + 1, "libc", 4);
843  if (!found_ld_mmap)
844  found_ld_mmap = !strncmp(bname + 1, "ld", 2);
845  } else if (!found_vdso_mmap)
846  found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
847  break;
848 
849  case PERF_RECORD_SAMPLE:
850  /* Just ignore samples for now */
851  break;
852  default:
853  pr_debug("Unexpected perf_event->header.type %d!\n",
854  type);
855  ++errs;
856  }
857  }
858  }
859 
860  /*
861  * We don't use poll here because at least at 3.1 times the
862  * PERF_RECORD_{!SAMPLE} events don't honour
863  * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
864  */
865  if (total_events == before && false)
866  poll(evlist->pollfd, evlist->nr_fds, -1);
867 
868  sleep(1);
869  if (++wakeups > 5) {
870  pr_debug("No PERF_RECORD_EXIT event!\n");
871  break;
872  }
873  }
874 
875 found_exit:
876  if (nr_events[PERF_RECORD_COMM] > 1) {
877  pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
878  ++errs;
879  }
880 
881  if (nr_events[PERF_RECORD_COMM] == 0) {
882  pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
883  ++errs;
884  }
885 
886  if (!found_cmd_mmap) {
887  pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
888  ++errs;
889  }
890 
891  if (!found_libc_mmap) {
892  pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
893  ++errs;
894  }
895 
896  if (!found_ld_mmap) {
897  pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
898  ++errs;
899  }
900 
901  if (!found_vdso_mmap) {
902  pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
903  ++errs;
904  }
905 out_err:
906  perf_evlist__munmap(evlist);
907 out_delete_evlist:
908  perf_evlist__delete(evlist);
909 out:
910  return (err < 0 || errs > 0) ? -1 : 0;
911 }
912 
913 
914 #if defined(__x86_64__) || defined(__i386__)
915 
916 #define barrier() asm volatile("" ::: "memory")
917 
918 static u64 rdpmc(unsigned int counter)
919 {
920  unsigned int low, high;
921 
922  asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
923 
924  return low | ((u64)high) << 32;
925 }
926 
927 static u64 rdtsc(void)
928 {
929  unsigned int low, high;
930 
931  asm volatile("rdtsc" : "=a" (low), "=d" (high));
932 
933  return low | ((u64)high) << 32;
934 }
935 
936 static u64 mmap_read_self(void *addr)
937 {
938  struct perf_event_mmap_page *pc = addr;
939  u32 seq, idx, time_mult = 0, time_shift = 0;
940  u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
941 
942  do {
943  seq = pc->lock;
944  barrier();
945 
946  enabled = pc->time_enabled;
947  running = pc->time_running;
948 
949  if (enabled != running) {
950  cyc = rdtsc();
951  time_mult = pc->time_mult;
952  time_shift = pc->time_shift;
953  time_offset = pc->time_offset;
954  }
955 
956  idx = pc->index;
957  count = pc->offset;
958  if (idx)
959  count += rdpmc(idx - 1);
960 
961  barrier();
962  } while (pc->lock != seq);
963 
964  if (enabled != running) {
965  u64 quot, rem;
966 
967  quot = (cyc >> time_shift);
968  rem = cyc & ((1 << time_shift) - 1);
969  delta = time_offset + quot * time_mult +
970  ((rem * time_mult) >> time_shift);
971 
972  enabled += delta;
973  if (idx)
974  running += delta;
975 
976  quot = count / running;
977  rem = count % running;
978  count = quot * enabled + (rem * enabled) / running;
979  }
980 
981  return count;
982 }
983 
984 /*
985  * If the RDPMC instruction faults then signal this back to the test parent task:
986  */
987 static void segfault_handler(int sig __maybe_unused,
988  siginfo_t *info __maybe_unused,
989  void *uc __maybe_unused)
990 {
991  exit(-1);
992 }
993 
994 static int __test__rdpmc(void)
995 {
996  long page_size = sysconf(_SC_PAGE_SIZE);
997  volatile int tmp = 0;
998  u64 i, loops = 1000;
999  int n;
1000  int fd;
1001  void *addr;
1002  struct perf_event_attr attr = {
1004  .config = PERF_COUNT_HW_INSTRUCTIONS,
1005  .exclude_kernel = 1,
1006  };
1007  u64 delta_sum = 0;
1008  struct sigaction sa;
1009 
1010  sigfillset(&sa.sa_mask);
1011  sa.sa_sigaction = segfault_handler;
1012  sigaction(SIGSEGV, &sa, NULL);
1013 
1014  fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
1015  if (fd < 0) {
1016  pr_err("Error: sys_perf_event_open() syscall returned "
1017  "with %d (%s)\n", fd, strerror(errno));
1018  return -1;
1019  }
1020 
1021  addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
1022  if (addr == (void *)(-1)) {
1023  pr_err("Error: mmap() syscall returned with (%s)\n",
1024  strerror(errno));
1025  goto out_close;
1026  }
1027 
1028  for (n = 0; n < 6; n++) {
1029  u64 stamp, now, delta;
1030 
1031  stamp = mmap_read_self(addr);
1032 
1033  for (i = 0; i < loops; i++)
1034  tmp++;
1035 
1036  now = mmap_read_self(addr);
1037  loops *= 10;
1038 
1039  delta = now - stamp;
1040  pr_debug("%14d: %14Lu\n", n, (long long)delta);
1041 
1042  delta_sum += delta;
1043  }
1044 
1045  munmap(addr, page_size);
1046  pr_debug(" ");
1047 out_close:
1048  close(fd);
1049 
1050  if (!delta_sum)
1051  return -1;
1052 
1053  return 0;
1054 }
1055 
1056 static int test__rdpmc(void)
1057 {
1058  int status = 0;
1059  int wret = 0;
1060  int ret;
1061  int pid;
1062 
1063  pid = fork();
1064  if (pid < 0)
1065  return -1;
1066 
1067  if (!pid) {
1068  ret = __test__rdpmc();
1069 
1070  exit(ret);
1071  }
1072 
1073  wret = waitpid(pid, &status, 0);
1074  if (wret < 0 || status)
1075  return -1;
1076 
1077  return 0;
1078 }
1079 
1080 #endif
1081 
1082 static int test__perf_pmu(void)
1083 {
1084  return perf_pmu__test();
1085 }
1086 
1087 static int perf_evsel__roundtrip_cache_name_test(void)
1088 {
1089  char name[128];
1090  int type, op, err = 0, ret = 0, i, idx;
1091  struct perf_evsel *evsel;
1092  struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1093 
1094  if (evlist == NULL)
1095  return -ENOMEM;
1096 
1097  for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1098  for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1099  /* skip invalid cache type */
1100  if (!perf_evsel__is_cache_op_valid(type, op))
1101  continue;
1102 
1103  for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1105  name, sizeof(name));
1106  err = parse_events(evlist, name, 0);
1107  if (err)
1108  ret = err;
1109  }
1110  }
1111  }
1112 
1113  idx = 0;
1114  evsel = perf_evlist__first(evlist);
1115 
1116  for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1117  for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1118  /* skip invalid cache type */
1119  if (!perf_evsel__is_cache_op_valid(type, op))
1120  continue;
1121 
1122  for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1124  name, sizeof(name));
1125  if (evsel->idx != idx)
1126  continue;
1127 
1128  ++idx;
1129 
1130  if (strcmp(perf_evsel__name(evsel), name)) {
1131  pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
1132  ret = -1;
1133  }
1134 
1135  evsel = perf_evsel__next(evsel);
1136  }
1137  }
1138  }
1139 
1140  perf_evlist__delete(evlist);
1141  return ret;
1142 }
1143 
1144 static int __perf_evsel__name_array_test(const char *names[], int nr_names)
1145 {
1146  int i, err;
1147  struct perf_evsel *evsel;
1148  struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1149 
1150  if (evlist == NULL)
1151  return -ENOMEM;
1152 
1153  for (i = 0; i < nr_names; ++i) {
1154  err = parse_events(evlist, names[i], 0);
1155  if (err) {
1156  pr_debug("failed to parse event '%s', err %d\n",
1157  names[i], err);
1158  goto out_delete_evlist;
1159  }
1160  }
1161 
1162  err = 0;
1163  list_for_each_entry(evsel, &evlist->entries, node) {
1164  if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
1165  --err;
1166  pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
1167  }
1168  }
1169 
1170 out_delete_evlist:
1171  perf_evlist__delete(evlist);
1172  return err;
1173 }
1174 
1175 #define perf_evsel__name_array_test(names) \
1176  __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
1177 
1178 static int perf_evsel__roundtrip_name_test(void)
1179 {
1180  int err = 0, ret = 0;
1181 
1183  if (err)
1184  ret = err;
1185 
1187  if (err)
1188  ret = err;
1189 
1190  err = perf_evsel__roundtrip_cache_name_test();
1191  if (err)
1192  ret = err;
1193 
1194  return ret;
1195 }
1196 
1197 static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
1198  int size, bool should_be_signed)
1199 {
1200  struct format_field *field = perf_evsel__field(evsel, name);
1201  int is_signed;
1202  int ret = 0;
1203 
1204  if (field == NULL) {
1205  pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
1206  return -1;
1207  }
1208 
1209  is_signed = !!(field->flags | FIELD_IS_SIGNED);
1210  if (should_be_signed && !is_signed) {
1211  pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
1212  evsel->name, name, is_signed, should_be_signed);
1213  ret = -1;
1214  }
1215 
1216  if (field->size != size) {
1217  pr_debug("%s: \"%s\" size (%d) should be %d!\n",
1218  evsel->name, name, field->size, size);
1219  ret = -1;
1220  }
1221 
1222  return ret;
1223 }
1224 
1225 static int perf_evsel__tp_sched_test(void)
1226 {
1227  struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
1228  int ret = 0;
1229 
1230  if (evsel == NULL) {
1231  pr_debug("perf_evsel__new\n");
1232  return -1;
1233  }
1234 
1235  if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
1236  ret = -1;
1237 
1238  if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
1239  ret = -1;
1240 
1241  if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
1242  ret = -1;
1243 
1244  if (perf_evsel__test_field(evsel, "prev_state", 8, true))
1245  ret = -1;
1246 
1247  if (perf_evsel__test_field(evsel, "next_comm", 16, true))
1248  ret = -1;
1249 
1250  if (perf_evsel__test_field(evsel, "next_pid", 4, true))
1251  ret = -1;
1252 
1253  if (perf_evsel__test_field(evsel, "next_prio", 4, true))
1254  ret = -1;
1255 
1256  perf_evsel__delete(evsel);
1257 
1258  evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
1259 
1260  if (perf_evsel__test_field(evsel, "comm", 16, true))
1261  ret = -1;
1262 
1263  if (perf_evsel__test_field(evsel, "pid", 4, true))
1264  ret = -1;
1265 
1266  if (perf_evsel__test_field(evsel, "prio", 4, true))
1267  ret = -1;
1268 
1269  if (perf_evsel__test_field(evsel, "success", 4, true))
1270  ret = -1;
1271 
1272  if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
1273  ret = -1;
1274 
1275  return ret;
1276 }
1277 
1278 static int test__syscall_open_tp_fields(void)
1279 {
1280  struct perf_record_opts opts = {
1281  .target = {
1282  .uid = UINT_MAX,
1283  .uses_mmap = true,
1284  },
1285  .no_delay = true,
1286  .freq = 1,
1287  .mmap_pages = 256,
1288  .raw_samples = true,
1289  };
1290  const char *filename = "/etc/passwd";
1291  int flags = O_RDONLY | O_DIRECTORY;
1292  struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1293  struct perf_evsel *evsel;
1294  int err = -1, i, nr_events = 0, nr_polls = 0;
1295 
1296  if (evlist == NULL) {
1297  pr_debug("%s: perf_evlist__new\n", __func__);
1298  goto out;
1299  }
1300 
1301  evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
1302  if (evsel == NULL) {
1303  pr_debug("%s: perf_evsel__newtp\n", __func__);
1304  goto out_delete_evlist;
1305  }
1306 
1307  perf_evlist__add(evlist, evsel);
1308 
1309  err = perf_evlist__create_maps(evlist, &opts.target);
1310  if (err < 0) {
1311  pr_debug("%s: perf_evlist__create_maps\n", __func__);
1312  goto out_delete_evlist;
1313  }
1314 
1315  perf_evsel__config(evsel, &opts, evsel);
1316 
1317  evlist->threads->map[0] = getpid();
1318 
1319  err = perf_evlist__open(evlist);
1320  if (err < 0) {
1321  pr_debug("perf_evlist__open: %s\n", strerror(errno));
1322  goto out_delete_evlist;
1323  }
1324 
1325  err = perf_evlist__mmap(evlist, UINT_MAX, false);
1326  if (err < 0) {
1327  pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
1328  goto out_delete_evlist;
1329  }
1330 
1331  perf_evlist__enable(evlist);
1332 
1333  /*
1334  * Generate the event:
1335  */
1336  open(filename, flags);
1337 
1338  while (1) {
1339  int before = nr_events;
1340 
1341  for (i = 0; i < evlist->nr_mmaps; i++) {
1342  union perf_event *event;
1343 
1344  while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
1345  const u32 type = event->header.type;
1346  int tp_flags;
1347  struct perf_sample sample;
1348 
1349  ++nr_events;
1350 
1351  if (type != PERF_RECORD_SAMPLE)
1352  continue;
1353 
1354  err = perf_evsel__parse_sample(evsel, event, &sample);
1355  if (err) {
1356  pr_err("Can't parse sample, err = %d\n", err);
1357  goto out_munmap;
1358  }
1359 
1360  tp_flags = perf_evsel__intval(evsel, &sample, "flags");
1361 
1362  if (flags != tp_flags) {
1363  pr_debug("%s: Expected flags=%#x, got %#x\n",
1364  __func__, flags, tp_flags);
1365  goto out_munmap;
1366  }
1367 
1368  goto out_ok;
1369  }
1370  }
1371 
1372  if (nr_events == before)
1373  poll(evlist->pollfd, evlist->nr_fds, 10);
1374 
1375  if (++nr_polls > 5) {
1376  pr_debug("%s: no events!\n", __func__);
1377  goto out_munmap;
1378  }
1379  }
1380 out_ok:
1381  err = 0;
1382 out_munmap:
1383  perf_evlist__munmap(evlist);
1384 out_delete_evlist:
1385  perf_evlist__delete(evlist);
1386 out:
1387  return err;
1388 }
1389 
1390 static struct test {
1391  const char *desc;
1392  int (*func)(void);
1393 } tests[] = {
1394  {
1395  .desc = "vmlinux symtab matches kallsyms",
1396  .func = test__vmlinux_matches_kallsyms,
1397  },
1398  {
1399  .desc = "detect open syscall event",
1400  .func = test__open_syscall_event,
1401  },
1402  {
1403  .desc = "detect open syscall event on all cpus",
1404  .func = test__open_syscall_event_on_all_cpus,
1405  },
1406  {
1407  .desc = "read samples using the mmap interface",
1408  .func = test__basic_mmap,
1409  },
1410  {
1411  .desc = "parse events tests",
1412  .func = parse_events__test,
1413  },
1414 #if defined(__x86_64__) || defined(__i386__)
1415  {
1416  .desc = "x86 rdpmc test",
1417  .func = test__rdpmc,
1418  },
1419 #endif
1420  {
1421  .desc = "Validate PERF_RECORD_* events & perf_sample fields",
1422  .func = test__PERF_RECORD,
1423  },
1424  {
1425  .desc = "Test perf pmu format parsing",
1426  .func = test__perf_pmu,
1427  },
1428  {
1429  .desc = "Test dso data interface",
1430  .func = dso__test_data,
1431  },
1432  {
1433  .desc = "roundtrip evsel->name check",
1434  .func = perf_evsel__roundtrip_name_test,
1435  },
1436  {
1437  .desc = "Check parsing of sched tracepoints fields",
1438  .func = perf_evsel__tp_sched_test,
1439  },
1440  {
1441  .desc = "Generate and check syscalls:sys_enter_open event fields",
1442  .func = test__syscall_open_tp_fields,
1443  },
1444  {
1445  .func = NULL,
1446  },
1447 };
1448 
1449 static bool perf_test__matches(int curr, int argc, const char *argv[])
1450 {
1451  int i;
1452 
1453  if (argc == 0)
1454  return true;
1455 
1456  for (i = 0; i < argc; ++i) {
1457  char *end;
1458  long nr = strtoul(argv[i], &end, 10);
1459 
1460  if (*end == '\0') {
1461  if (nr == curr + 1)
1462  return true;
1463  continue;
1464  }
1465 
1466  if (strstr(tests[curr].desc, argv[i]))
1467  return true;
1468  }
1469 
1470  return false;
1471 }
1472 
1473 static int __cmd_test(int argc, const char *argv[])
1474 {
1475  int i = 0;
1476 
1477  while (tests[i].func) {
1478  int curr = i++, err;
1479 
1480  if (!perf_test__matches(curr, argc, argv))
1481  continue;
1482 
1483  pr_info("%2d: %s:", i, tests[curr].desc);
1484  pr_debug("\n--- start ---\n");
1485  err = tests[curr].func();
1486  pr_debug("---- end ----\n%s:", tests[curr].desc);
1487  pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
1488  }
1489 
1490  return 0;
1491 }
1492 
1493 static int perf_test__list(int argc, const char **argv)
1494 {
1495  int i = 0;
1496 
1497  while (tests[i].func) {
1498  int curr = i++;
1499 
1500  if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
1501  continue;
1502 
1503  pr_info("%2d: %s\n", i, tests[curr].desc);
1504  }
1505 
1506  return 0;
1507 }
1508 
1509 int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
1510 {
1511  const char * const test_usage[] = {
1512  "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1513  NULL,
1514  };
1515  const struct option test_options[] = {
1516  OPT_INCR('v', "verbose", &verbose,
1517  "be more verbose (show symbol address, etc)"),
1518  OPT_END()
1519  };
1520 
1521  argc = parse_options(argc, argv, test_options, test_usage, 0);
1522  if (argc >= 1 && !strcmp(argv[0], "list"))
1523  return perf_test__list(argc, argv);
1524 
1525  symbol_conf.priv_size = sizeof(int);
1526  symbol_conf.sort_by_name = true;
1528 
1529  if (symbol__init() < 0)
1530  return -1;
1531 
1532  return __cmd_test(argc, argv);
1533 }