Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
parse-events.c
Go to the documentation of this file.
1 #include <linux/hw_breakpoint.h>
2 #include "util.h"
3 #include "../perf.h"
4 #include "evlist.h"
5 #include "evsel.h"
6 #include "parse-options.h"
7 #include "parse-events.h"
8 #include "exec_cmd.h"
9 #include "string.h"
10 #include "symbol.h"
11 #include "cache.h"
12 #include "header.h"
13 #include "debugfs.h"
14 #include "parse-events-bison.h"
15 #define YY_EXTRA_TYPE int
16 #include "parse-events-flex.h"
17 #include "pmu.h"
18 
19 #define MAX_NAME_LEN 100
20 
21 struct event_symbol {
22  const char *symbol;
23  const char *alias;
24 };
25 
26 #ifdef PARSER_DEBUG
27 extern int parse_events_debug;
28 #endif
29 int parse_events_parse(void *data, void *scanner);
30 
31 static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
33  .symbol = "cpu-cycles",
34  .alias = "cycles",
35  },
37  .symbol = "instructions",
38  .alias = "",
39  },
41  .symbol = "cache-references",
42  .alias = "",
43  },
45  .symbol = "cache-misses",
46  .alias = "",
47  },
49  .symbol = "branch-instructions",
50  .alias = "branches",
51  },
53  .symbol = "branch-misses",
54  .alias = "",
55  },
57  .symbol = "bus-cycles",
58  .alias = "",
59  },
61  .symbol = "stalled-cycles-frontend",
62  .alias = "idle-cycles-frontend",
63  },
65  .symbol = "stalled-cycles-backend",
66  .alias = "idle-cycles-backend",
67  },
69  .symbol = "ref-cycles",
70  .alias = "",
71  },
72 };
73 
74 static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
76  .symbol = "cpu-clock",
77  .alias = "",
78  },
80  .symbol = "task-clock",
81  .alias = "",
82  },
84  .symbol = "page-faults",
85  .alias = "faults",
86  },
88  .symbol = "context-switches",
89  .alias = "cs",
90  },
92  .symbol = "cpu-migrations",
93  .alias = "migrations",
94  },
96  .symbol = "minor-faults",
97  .alias = "",
98  },
100  .symbol = "major-faults",
101  .alias = "",
102  },
104  .symbol = "alignment-faults",
105  .alias = "",
106  },
108  .symbol = "emulation-faults",
109  .alias = "",
110  },
111 };
112 
113 #define __PERF_EVENT_FIELD(config, name) \
114  ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
115 
116 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
117 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
118 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
119 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
120 
121 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
122  while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
123  if (sys_dirent.d_type == DT_DIR && \
124  (strcmp(sys_dirent.d_name, ".")) && \
125  (strcmp(sys_dirent.d_name, "..")))
126 
127 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
128 {
129  char evt_path[MAXPATHLEN];
130  int fd;
131 
132  snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
133  sys_dir->d_name, evt_dir->d_name);
134  fd = open(evt_path, O_RDONLY);
135  if (fd < 0)
136  return -EINVAL;
137  close(fd);
138 
139  return 0;
140 }
141 
142 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
143  while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
144  if (evt_dirent.d_type == DT_DIR && \
145  (strcmp(evt_dirent.d_name, ".")) && \
146  (strcmp(evt_dirent.d_name, "..")) && \
147  (!tp_event_has_id(&sys_dirent, &evt_dirent)))
148 
149 #define MAX_EVENT_LENGTH 512
150 
151 
153 {
154  struct tracepoint_path *path = NULL;
155  DIR *sys_dir, *evt_dir;
156  struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
157  char id_buf[24];
158  int fd;
159  u64 id;
160  char evt_path[MAXPATHLEN];
161  char dir_path[MAXPATHLEN];
162 
164  return NULL;
165 
166  sys_dir = opendir(tracing_events_path);
167  if (!sys_dir)
168  return NULL;
169 
170  for_each_subsystem(sys_dir, sys_dirent, sys_next) {
171 
172  snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
173  sys_dirent.d_name);
174  evt_dir = opendir(dir_path);
175  if (!evt_dir)
176  continue;
177 
178  for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
179 
180  snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
181  evt_dirent.d_name);
182  fd = open(evt_path, O_RDONLY);
183  if (fd < 0)
184  continue;
185  if (read(fd, id_buf, sizeof(id_buf)) < 0) {
186  close(fd);
187  continue;
188  }
189  close(fd);
190  id = atoll(id_buf);
191  if (id == config) {
192  closedir(evt_dir);
193  closedir(sys_dir);
194  path = zalloc(sizeof(*path));
195  path->system = malloc(MAX_EVENT_LENGTH);
196  if (!path->system) {
197  free(path);
198  return NULL;
199  }
200  path->name = malloc(MAX_EVENT_LENGTH);
201  if (!path->name) {
202  free(path->system);
203  free(path);
204  return NULL;
205  }
206  strncpy(path->system, sys_dirent.d_name,
208  strncpy(path->name, evt_dirent.d_name,
210  return path;
211  }
212  }
213  closedir(evt_dir);
214  }
215 
216  closedir(sys_dir);
217  return NULL;
218 }
219 
220 const char *event_type(int type)
221 {
222  switch (type) {
223  case PERF_TYPE_HARDWARE:
224  return "hardware";
225 
226  case PERF_TYPE_SOFTWARE:
227  return "software";
228 
230  return "tracepoint";
231 
232  case PERF_TYPE_HW_CACHE:
233  return "hardware-cache";
234 
235  default:
236  break;
237  }
238 
239  return "unknown";
240 }
241 
242 
243 
244 static int __add_event(struct list_head **_list, int *idx,
245  struct perf_event_attr *attr,
246  char *name, struct cpu_map *cpus)
247 {
248  struct perf_evsel *evsel;
249  struct list_head *list = *_list;
250 
251  if (!list) {
252  list = malloc(sizeof(*list));
253  if (!list)
254  return -ENOMEM;
255  INIT_LIST_HEAD(list);
256  }
257 
258  event_attr_init(attr);
259 
260  evsel = perf_evsel__new(attr, (*idx)++);
261  if (!evsel) {
262  free(list);
263  return -ENOMEM;
264  }
265 
266  evsel->cpus = cpus;
267  if (name)
268  evsel->name = strdup(name);
269  list_add_tail(&evsel->node, list);
270  *_list = list;
271  return 0;
272 }
273 
274 static int add_event(struct list_head **_list, int *idx,
275  struct perf_event_attr *attr, char *name)
276 {
277  return __add_event(_list, idx, attr, name, NULL);
278 }
279 
280 static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
281 {
282  int i, j;
283  int n, longest = -1;
284 
285  for (i = 0; i < size; i++) {
286  for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
287  n = strlen(names[i][j]);
288  if (n > longest && !strncasecmp(str, names[i][j], n))
289  longest = n;
290  }
291  if (longest > 0)
292  return i;
293  }
294 
295  return -1;
296 }
297 
298 int parse_events_add_cache(struct list_head **list, int *idx,
299  char *type, char *op_result1, char *op_result2)
300 {
301  struct perf_event_attr attr;
302  char name[MAX_NAME_LEN];
303  int cache_type = -1, cache_op = -1, cache_result = -1;
304  char *op_result[2] = { op_result1, op_result2 };
305  int i, n;
306 
307  /*
308  * No fallback - if we cannot get a clear cache type
309  * then bail out:
310  */
311  cache_type = parse_aliases(type, perf_evsel__hw_cache,
313  if (cache_type == -1)
314  return -EINVAL;
315 
316  n = snprintf(name, MAX_NAME_LEN, "%s", type);
317 
318  for (i = 0; (i < 2) && (op_result[i]); i++) {
319  char *str = op_result[i];
320 
321  n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
322 
323  if (cache_op == -1) {
324  cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
326  if (cache_op >= 0) {
327  if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
328  return -EINVAL;
329  continue;
330  }
331  }
332 
333  if (cache_result == -1) {
334  cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
336  if (cache_result >= 0)
337  continue;
338  }
339  }
340 
341  /*
342  * Fall back to reads:
343  */
344  if (cache_op == -1)
346 
347  /*
348  * Fall back to accesses:
349  */
350  if (cache_result == -1)
351  cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
352 
353  memset(&attr, 0, sizeof(attr));
354  attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
355  attr.type = PERF_TYPE_HW_CACHE;
356  return add_event(list, idx, &attr, name);
357 }
358 
359 static int add_tracepoint(struct list_head **listp, int *idx,
360  char *sys_name, char *evt_name)
361 {
362  struct perf_evsel *evsel;
363  struct list_head *list = *listp;
364 
365  if (!list) {
366  list = malloc(sizeof(*list));
367  if (!list)
368  return -ENOMEM;
369  INIT_LIST_HEAD(list);
370  }
371 
372  evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++);
373  if (!evsel) {
374  free(list);
375  return -ENOMEM;
376  }
377 
378  list_add_tail(&evsel->node, list);
379  *listp = list;
380  return 0;
381 }
382 
383 static int add_tracepoint_multi(struct list_head **list, int *idx,
384  char *sys_name, char *evt_name)
385 {
386  char evt_path[MAXPATHLEN];
387  struct dirent *evt_ent;
388  DIR *evt_dir;
389  int ret = 0;
390 
391  snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
392  evt_dir = opendir(evt_path);
393  if (!evt_dir) {
394  perror("Can't open event dir");
395  return -1;
396  }
397 
398  while (!ret && (evt_ent = readdir(evt_dir))) {
399  if (!strcmp(evt_ent->d_name, ".")
400  || !strcmp(evt_ent->d_name, "..")
401  || !strcmp(evt_ent->d_name, "enable")
402  || !strcmp(evt_ent->d_name, "filter"))
403  continue;
404 
405  if (!strglobmatch(evt_ent->d_name, evt_name))
406  continue;
407 
408  ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
409  }
410 
411  return ret;
412 }
413 
414 int parse_events_add_tracepoint(struct list_head **list, int *idx,
415  char *sys, char *event)
416 {
417  int ret;
418 
420  if (ret)
421  return ret;
422 
423  return strpbrk(event, "*?") ?
424  add_tracepoint_multi(list, idx, sys, event) :
425  add_tracepoint(list, idx, sys, event);
426 }
427 
428 static int
429 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
430 {
431  int i;
432 
433  for (i = 0; i < 3; i++) {
434  if (!type || !type[i])
435  break;
436 
437 #define CHECK_SET_TYPE(bit) \
438 do { \
439  if (attr->bp_type & bit) \
440  return -EINVAL; \
441  else \
442  attr->bp_type |= bit; \
443 } while (0)
444 
445  switch (type[i]) {
446  case 'r':
448  break;
449  case 'w':
451  break;
452  case 'x':
454  break;
455  default:
456  return -EINVAL;
457  }
458  }
459 
460 #undef CHECK_SET_TYPE
461 
462  if (!attr->bp_type) /* Default */
464 
465  return 0;
466 }
467 
468 int parse_events_add_breakpoint(struct list_head **list, int *idx,
469  void *ptr, char *type)
470 {
471  struct perf_event_attr attr;
472 
473  memset(&attr, 0, sizeof(attr));
474  attr.bp_addr = (unsigned long) ptr;
475 
476  if (parse_breakpoint_type(type, &attr))
477  return -EINVAL;
478 
479  /*
480  * We should find a nice way to override the access length
481  * Provide some defaults for now
482  */
483  if (attr.bp_type == HW_BREAKPOINT_X)
484  attr.bp_len = sizeof(long);
485  else
487 
488  attr.type = PERF_TYPE_BREAKPOINT;
489  attr.sample_period = 1;
490 
491  return add_event(list, idx, &attr, NULL);
492 }
493 
494 static int config_term(struct perf_event_attr *attr,
495  struct parse_events__term *term)
496 {
497 #define CHECK_TYPE_VAL(type) \
498 do { \
499  if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
500  return -EINVAL; \
501 } while (0)
502 
503  switch (term->type_term) {
506  attr->config = term->val.num;
507  break;
510  attr->config1 = term->val.num;
511  break;
514  attr->config2 = term->val.num;
515  break;
518  attr->sample_period = term->val.num;
519  break;
521  /*
522  * TODO uncomment when the field is available
523  * attr->branch_sample_type = term->val.num;
524  */
525  break;
528  break;
529  default:
530  return -EINVAL;
531  }
532 
533  return 0;
534 #undef CHECK_TYPE_VAL
535 }
536 
537 static int config_attr(struct perf_event_attr *attr,
538  struct list_head *head, int fail)
539 {
540  struct parse_events__term *term;
541 
542  list_for_each_entry(term, head, list)
543  if (config_term(attr, term) && fail)
544  return -EINVAL;
545 
546  return 0;
547 }
548 
550  u32 type, u64 config,
551  struct list_head *head_config)
552 {
553  struct perf_event_attr attr;
554 
555  memset(&attr, 0, sizeof(attr));
556  attr.type = type;
557  attr.config = config;
558 
559  if (head_config &&
560  config_attr(&attr, head_config, 1))
561  return -EINVAL;
562 
563  return add_event(list, idx, &attr, NULL);
564 }
565 
566 static int parse_events__is_name_term(struct parse_events__term *term)
567 {
568  return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
569 }
570 
571 static char *pmu_event_name(struct list_head *head_terms)
572 {
573  struct parse_events__term *term;
574 
575  list_for_each_entry(term, head_terms, list)
576  if (parse_events__is_name_term(term))
577  return term->val.str;
578 
579  return NULL;
580 }
581 
582 int parse_events_add_pmu(struct list_head **list, int *idx,
583  char *name, struct list_head *head_config)
584 {
585  struct perf_event_attr attr;
586  struct perf_pmu *pmu;
587 
588  pmu = perf_pmu__find(name);
589  if (!pmu)
590  return -EINVAL;
591 
592  memset(&attr, 0, sizeof(attr));
593 
594  if (perf_pmu__check_alias(pmu, head_config))
595  return -EINVAL;
596 
597  /*
598  * Configure hardcoded terms first, no need to check
599  * return value when called with fail == 0 ;)
600  */
601  config_attr(&attr, head_config, 0);
602 
603  if (perf_pmu__config(pmu, &attr, head_config))
604  return -EINVAL;
605 
606  return __add_event(list, idx, &attr, pmu_event_name(head_config),
607  pmu->cpus);
608 }
609 
611  char *event_mod)
612 {
613  return parse_events__modifier_event(list, event_mod, true);
614 }
615 
616 void parse_events__set_leader(char *name, struct list_head *list)
617 {
618  struct perf_evsel *leader;
619 
621  leader = list_entry(list->next, struct perf_evsel, node);
622  leader->group_name = name ? strdup(name) : NULL;
623 }
624 
625 void parse_events_update_lists(struct list_head *list_event,
626  struct list_head *list_all)
627 {
628  /*
629  * Called for single event definition. Update the
630  * 'all event' list, and reinit the 'single event'
631  * list, for next event definition.
632  */
633  list_splice_tail(list_event, list_all);
634  free(list_event);
635 }
636 
638  int eu;
639  int ek;
640  int eh;
641  int eH;
642  int eG;
643  int precise;
645 };
646 
647 static int get_event_modifier(struct event_modifier *mod, char *str,
648  struct perf_evsel *evsel)
649 {
650  int eu = evsel ? evsel->attr.exclude_user : 0;
651  int ek = evsel ? evsel->attr.exclude_kernel : 0;
652  int eh = evsel ? evsel->attr.exclude_hv : 0;
653  int eH = evsel ? evsel->attr.exclude_host : 0;
654  int eG = evsel ? evsel->attr.exclude_guest : 0;
655  int precise = evsel ? evsel->attr.precise_ip : 0;
656 
657  int exclude = eu | ek | eh;
658  int exclude_GH = evsel ? evsel->exclude_GH : 0;
659 
660  /*
661  * We are here for group and 'GH' was not set as event
662  * modifier and whatever event/group modifier override
663  * default 'GH' setup.
664  */
665  if (evsel && !exclude_GH)
666  eH = eG = 0;
667 
668  memset(mod, 0, sizeof(*mod));
669 
670  while (*str) {
671  if (*str == 'u') {
672  if (!exclude)
673  exclude = eu = ek = eh = 1;
674  eu = 0;
675  } else if (*str == 'k') {
676  if (!exclude)
677  exclude = eu = ek = eh = 1;
678  ek = 0;
679  } else if (*str == 'h') {
680  if (!exclude)
681  exclude = eu = ek = eh = 1;
682  eh = 0;
683  } else if (*str == 'G') {
684  if (!exclude_GH)
685  exclude_GH = eG = eH = 1;
686  eG = 0;
687  } else if (*str == 'H') {
688  if (!exclude_GH)
689  exclude_GH = eG = eH = 1;
690  eH = 0;
691  } else if (*str == 'p') {
692  precise++;
693  /* use of precise requires exclude_guest */
694  if (!exclude_GH)
695  eG = 1;
696  } else
697  break;
698 
699  ++str;
700  }
701 
702  /*
703  * precise ip:
704  *
705  * 0 - SAMPLE_IP can have arbitrary skid
706  * 1 - SAMPLE_IP must have constant skid
707  * 2 - SAMPLE_IP requested to have 0 skid
708  * 3 - SAMPLE_IP must have 0 skid
709  *
710  * See also PERF_RECORD_MISC_EXACT_IP
711  */
712  if (precise > 3)
713  return -EINVAL;
714 
715  mod->eu = eu;
716  mod->ek = ek;
717  mod->eh = eh;
718  mod->eH = eH;
719  mod->eG = eG;
720  mod->precise = precise;
721  mod->exclude_GH = exclude_GH;
722  return 0;
723 }
724 
725 int parse_events__modifier_event(struct list_head *list, char *str, bool add)
726 {
727  struct perf_evsel *evsel;
728  struct event_modifier mod;
729 
730  if (str == NULL)
731  return 0;
732 
733  if (!add && get_event_modifier(&mod, str, NULL))
734  return -EINVAL;
735 
736  list_for_each_entry(evsel, list, node) {
737 
738  if (add && get_event_modifier(&mod, str, evsel))
739  return -EINVAL;
740 
741  evsel->attr.exclude_user = mod.eu;
742  evsel->attr.exclude_kernel = mod.ek;
743  evsel->attr.exclude_hv = mod.eh;
744  evsel->attr.precise_ip = mod.precise;
745  evsel->attr.exclude_host = mod.eH;
746  evsel->attr.exclude_guest = mod.eG;
747  evsel->exclude_GH = mod.exclude_GH;
748  }
749 
750  return 0;
751 }
752 
753 int parse_events_name(struct list_head *list, char *name)
754 {
755  struct perf_evsel *evsel;
756 
757  list_for_each_entry(evsel, list, node) {
758  if (!evsel->name)
759  evsel->name = strdup(name);
760  }
761 
762  return 0;
763 }
764 
765 static int parse_events__scanner(const char *str, void *data, int start_token)
766 {
767  YY_BUFFER_STATE buffer;
768  void *scanner;
769  int ret;
770 
771  ret = parse_events_lex_init_extra(start_token, &scanner);
772  if (ret)
773  return ret;
774 
775  buffer = parse_events__scan_string(str, scanner);
776 
777 #ifdef PARSER_DEBUG
778  parse_events_debug = 1;
779 #endif
780  ret = parse_events_parse(data, scanner);
781 
782  parse_events__flush_buffer(buffer, scanner);
783  parse_events__delete_buffer(buffer, scanner);
784  parse_events_lex_destroy(scanner);
785  return ret;
786 }
787 
788 /*
789  * parse event config string, return a list of event terms.
790  */
791 int parse_events_terms(struct list_head *terms, const char *str)
792 {
793  struct parse_events_data__terms data = {
794  .terms = NULL,
795  };
796  int ret;
797 
798  ret = parse_events__scanner(str, &data, PE_START_TERMS);
799  if (!ret) {
800  list_splice(data.terms, terms);
801  free(data.terms);
802  return 0;
803  }
804 
806  return ret;
807 }
808 
809 int parse_events(struct perf_evlist *evlist, const char *str,
810  int unset __maybe_unused)
811 {
812  struct parse_events_data__events data = {
813  .list = LIST_HEAD_INIT(data.list),
814  .idx = evlist->nr_entries,
815  };
816  int ret;
817 
818  ret = parse_events__scanner(str, &data, PE_START_EVENTS);
819  if (!ret) {
820  int entries = data.idx - evlist->nr_entries;
821  perf_evlist__splice_list_tail(evlist, &data.list, entries);
822  return 0;
823  }
824 
825  /*
826  * There are 2 users - builtin-record and builtin-test objects.
827  * Both call perf_evlist__delete in case of error, so we dont
828  * need to bother.
829  */
830  fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
831  fprintf(stderr, "Run 'perf list' for a list of valid events\n");
832  return ret;
833 }
834 
835 int parse_events_option(const struct option *opt, const char *str,
836  int unset __maybe_unused)
837 {
838  struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
839  return parse_events(evlist, str, unset);
840 }
841 
842 int parse_filter(const struct option *opt, const char *str,
843  int unset __maybe_unused)
844 {
845  struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
846  struct perf_evsel *last = NULL;
847 
848  if (evlist->nr_entries > 0)
849  last = perf_evlist__last(evlist);
850 
851  if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
852  fprintf(stderr,
853  "-F option should follow a -e tracepoint option\n");
854  return -1;
855  }
856 
857  last->filter = strdup(str);
858  if (last->filter == NULL) {
859  fprintf(stderr, "not enough memory to hold filter string\n");
860  return -1;
861  }
862 
863  return 0;
864 }
865 
866 static const char * const event_type_descriptors[] = {
867  "Hardware event",
868  "Software event",
869  "Tracepoint event",
870  "Hardware cache event",
871  "Raw hardware event descriptor",
872  "Hardware breakpoint",
873 };
874 
875 /*
876  * Print the events from <debugfs_mount_point>/tracing/events
877  */
878 
879 void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
880  bool name_only)
881 {
882  DIR *sys_dir, *evt_dir;
883  struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
884  char evt_path[MAXPATHLEN];
885  char dir_path[MAXPATHLEN];
886 
888  return;
889 
890  sys_dir = opendir(tracing_events_path);
891  if (!sys_dir)
892  return;
893 
894  for_each_subsystem(sys_dir, sys_dirent, sys_next) {
895  if (subsys_glob != NULL &&
896  !strglobmatch(sys_dirent.d_name, subsys_glob))
897  continue;
898 
899  snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
900  sys_dirent.d_name);
901  evt_dir = opendir(dir_path);
902  if (!evt_dir)
903  continue;
904 
905  for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
906  if (event_glob != NULL &&
907  !strglobmatch(evt_dirent.d_name, event_glob))
908  continue;
909 
910  if (name_only) {
911  printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name);
912  continue;
913  }
914 
915  snprintf(evt_path, MAXPATHLEN, "%s:%s",
916  sys_dirent.d_name, evt_dirent.d_name);
917  printf(" %-50s [%s]\n", evt_path,
918  event_type_descriptors[PERF_TYPE_TRACEPOINT]);
919  }
920  closedir(evt_dir);
921  }
922  closedir(sys_dir);
923 }
924 
925 /*
926  * Check whether event is in <debugfs_mount_point>/tracing/events
927  */
928 
929 int is_valid_tracepoint(const char *event_string)
930 {
931  DIR *sys_dir, *evt_dir;
932  struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
933  char evt_path[MAXPATHLEN];
934  char dir_path[MAXPATHLEN];
935 
937  return 0;
938 
939  sys_dir = opendir(tracing_events_path);
940  if (!sys_dir)
941  return 0;
942 
943  for_each_subsystem(sys_dir, sys_dirent, sys_next) {
944 
945  snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
946  sys_dirent.d_name);
947  evt_dir = opendir(dir_path);
948  if (!evt_dir)
949  continue;
950 
951  for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
952  snprintf(evt_path, MAXPATHLEN, "%s:%s",
953  sys_dirent.d_name, evt_dirent.d_name);
954  if (!strcmp(evt_path, event_string)) {
955  closedir(evt_dir);
956  closedir(sys_dir);
957  return 1;
958  }
959  }
960  closedir(evt_dir);
961  }
962  closedir(sys_dir);
963  return 0;
964 }
965 
966 static void __print_events_type(u8 type, struct event_symbol *syms,
967  unsigned max)
968 {
969  char name[64];
970  unsigned i;
971 
972  for (i = 0; i < max ; i++, syms++) {
973  if (strlen(syms->alias))
974  snprintf(name, sizeof(name), "%s OR %s",
975  syms->symbol, syms->alias);
976  else
977  snprintf(name, sizeof(name), "%s", syms->symbol);
978 
979  printf(" %-50s [%s]\n", name,
980  event_type_descriptors[type]);
981  }
982 }
983 
985 {
986  if (type == PERF_TYPE_SOFTWARE)
987  __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX);
988  else
989  __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX);
990 }
991 
992 int print_hwcache_events(const char *event_glob, bool name_only)
993 {
994  unsigned int type, op, i, printed = 0;
995  char name[64];
996 
997  for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
998  for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
999  /* skip invalid cache type */
1000  if (!perf_evsel__is_cache_op_valid(type, op))
1001  continue;
1002 
1003  for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1005  name, sizeof(name));
1006  if (event_glob != NULL && !strglobmatch(name, event_glob))
1007  continue;
1008 
1009  if (name_only)
1010  printf("%s ", name);
1011  else
1012  printf(" %-50s [%s]\n", name,
1013  event_type_descriptors[PERF_TYPE_HW_CACHE]);
1014  ++printed;
1015  }
1016  }
1017  }
1018 
1019  return printed;
1020 }
1021 
1022 static void print_symbol_events(const char *event_glob, unsigned type,
1023  struct event_symbol *syms, unsigned max,
1024  bool name_only)
1025 {
1026  unsigned i, printed = 0;
1027  char name[MAX_NAME_LEN];
1028 
1029  for (i = 0; i < max; i++, syms++) {
1030 
1031  if (event_glob != NULL &&
1032  !(strglobmatch(syms->symbol, event_glob) ||
1033  (syms->alias && strglobmatch(syms->alias, event_glob))))
1034  continue;
1035 
1036  if (name_only) {
1037  printf("%s ", syms->symbol);
1038  continue;
1039  }
1040 
1041  if (strlen(syms->alias))
1042  snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
1043  else
1044  strncpy(name, syms->symbol, MAX_NAME_LEN);
1045 
1046  printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
1047 
1048  printed++;
1049  }
1050 
1051  if (printed)
1052  printf("\n");
1053 }
1054 
1055 /*
1056  * Print the help text for the event symbols:
1057  */
1058 void print_events(const char *event_glob, bool name_only)
1059 {
1060  if (!name_only) {
1061  printf("\n");
1062  printf("List of pre-defined events (to be used in -e):\n");
1063  }
1064 
1065  print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
1066  event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
1067 
1068  print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
1069  event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
1070 
1071  print_hwcache_events(event_glob, name_only);
1072 
1073  if (event_glob != NULL)
1074  return;
1075 
1076  if (!name_only) {
1077  printf("\n");
1078  printf(" %-50s [%s]\n",
1079  "rNNN",
1080  event_type_descriptors[PERF_TYPE_RAW]);
1081  printf(" %-50s [%s]\n",
1082  "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
1083  event_type_descriptors[PERF_TYPE_RAW]);
1084  printf(" (see 'perf list --help' on how to encode it)\n");
1085  printf("\n");
1086 
1087  printf(" %-50s [%s]\n",
1088  "mem:<addr>[:access]",
1089  event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1090  printf("\n");
1091  }
1092 
1093  print_tracepoint_events(NULL, NULL, name_only);
1094 }
1095 
1097 {
1098  return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
1099 }
1100 
1101 static int new_term(struct parse_events__term **_term, int type_val,
1102  int type_term, char *config,
1103  char *str, u64 num)
1104 {
1105  struct parse_events__term *term;
1106 
1107  term = zalloc(sizeof(*term));
1108  if (!term)
1109  return -ENOMEM;
1110 
1111  INIT_LIST_HEAD(&term->list);
1112  term->type_val = type_val;
1113  term->type_term = type_term;
1114  term->config = config;
1115 
1116  switch (type_val) {
1118  term->val.num = num;
1119  break;
1121  term->val.str = str;
1122  break;
1123  default:
1124  return -EINVAL;
1125  }
1126 
1127  *_term = term;
1128  return 0;
1129 }
1130 
1132  int type_term, char *config, u64 num)
1133 {
1134  return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
1135  config, NULL, num);
1136 }
1137 
1139  int type_term, char *config, char *str)
1140 {
1141  return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
1142  config, str, 0);
1143 }
1144 
1146  struct parse_events__term *term)
1147 {
1148  return new_term(new, term->type_val, term->type_term, term->config,
1149  term->val.str, term->val.num);
1150 }
1151 
1153 {
1154  struct parse_events__term *term, *h;
1155 
1156  list_for_each_entry_safe(term, h, terms, list)
1157  free(term);
1158 
1159  free(terms);
1160 }