Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hist.c
Go to the documentation of this file.
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8 
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10  struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12  struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14  struct hist_entry *he);
15 
21 };
22 
25  .min_percent = 0.5,
26  .order = ORDER_CALLEE
27 };
28 
30 {
31  return hists->col_len[col];
32 }
33 
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35 {
36  hists->col_len[col] = len;
37 }
38 
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40 {
41  if (len > hists__col_len(hists, col)) {
42  hists__set_col_len(hists, col, len);
43  return true;
44  }
45  return false;
46 }
47 
49 {
50  enum hist_column col;
51 
52  for (col = 0; col < HISTC_NR_COLS; ++col)
53  hists__set_col_len(hists, col, 0);
54 }
55 
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
57 {
58  const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59 
60  if (hists__col_len(hists, dso) < unresolved_col_width &&
63  hists__set_col_len(hists, dso, unresolved_col_width);
64 }
65 
66 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67 {
68  const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69  u16 len;
70 
71  if (h->ms.sym)
72  hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73  else
74  hists__set_unres_dso_col_len(hists, HISTC_DSO);
75 
76  len = thread__comm_len(h->thread);
77  if (hists__new_col_len(hists, HISTC_COMM, len))
78  hists__set_col_len(hists, HISTC_THREAD, len + 6);
79 
80  if (h->ms.map) {
81  len = dso__name_len(h->ms.map->dso);
82  hists__new_col_len(hists, HISTC_DSO, len);
83  }
84 
85  if (h->branch_info) {
86  int symlen;
87  /*
88  * +4 accounts for '[x] ' priv level info
89  * +2 account of 0x prefix on raw addresses
90  */
91  if (h->branch_info->from.sym) {
92  symlen = (int)h->branch_info->from.sym->namelen + 4;
93  hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
94 
95  symlen = dso__name_len(h->branch_info->from.map->dso);
96  hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
97  } else {
98  symlen = unresolved_col_width + 4 + 2;
99  hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100  hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
101  }
102 
103  if (h->branch_info->to.sym) {
104  symlen = (int)h->branch_info->to.sym->namelen + 4;
105  hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
106 
107  symlen = dso__name_len(h->branch_info->to.map->dso);
108  hists__new_col_len(hists, HISTC_DSO_TO, symlen);
109  } else {
110  symlen = unresolved_col_width + 4 + 2;
111  hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112  hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113  }
114  }
115 }
116 
117 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
118 {
119  struct rb_node *next = rb_first(&hists->entries);
120  struct hist_entry *n;
121  int row = 0;
122 
123  hists__reset_col_len(hists);
124 
125  while (next && row++ < max_rows) {
126  n = rb_entry(next, struct hist_entry, rb_node);
127  if (!n->filtered)
128  hists__calc_col_len(hists, n);
129  next = rb_next(&n->rb_node);
130  }
131 }
132 
133 static void hist_entry__add_cpumode_period(struct hist_entry *he,
134  unsigned int cpumode, u64 period)
135 {
136  switch (cpumode) {
138  he->stat.period_sys += period;
139  break;
141  he->stat.period_us += period;
142  break;
144  he->stat.period_guest_sys += period;
145  break;
147  he->stat.period_guest_us += period;
148  break;
149  default:
150  break;
151  }
152 }
153 
154 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
155 {
156  he_stat->period += period;
157  he_stat->nr_events += 1;
158 }
159 
160 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
161 {
162  dest->period += src->period;
163  dest->period_sys += src->period_sys;
164  dest->period_us += src->period_us;
165  dest->period_guest_sys += src->period_guest_sys;
166  dest->period_guest_us += src->period_guest_us;
167  dest->nr_events += src->nr_events;
168 }
169 
170 static void hist_entry__decay(struct hist_entry *he)
171 {
172  he->stat.period = (he->stat.period * 7) / 8;
173  he->stat.nr_events = (he->stat.nr_events * 7) / 8;
174 }
175 
176 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
177 {
178  u64 prev_period = he->stat.period;
179 
180  if (prev_period == 0)
181  return true;
182 
183  hist_entry__decay(he);
184 
185  if (!he->filtered)
186  hists->stats.total_period -= prev_period - he->stat.period;
187 
188  return he->stat.period == 0;
189 }
190 
191 static void __hists__decay_entries(struct hists *hists, bool zap_user,
192  bool zap_kernel, bool threaded)
193 {
194  struct rb_node *next = rb_first(&hists->entries);
195  struct hist_entry *n;
196 
197  while (next) {
198  n = rb_entry(next, struct hist_entry, rb_node);
199  next = rb_next(&n->rb_node);
200  /*
201  * We may be annotating this, for instance, so keep it here in
202  * case some it gets new samples, we'll eventually free it when
203  * the user stops browsing and it agains gets fully decayed.
204  */
205  if (((zap_user && n->level == '.') ||
206  (zap_kernel && n->level != '.') ||
207  hists__decay_entry(hists, n)) &&
208  !n->used) {
209  rb_erase(&n->rb_node, &hists->entries);
210 
211  if (sort__need_collapse || threaded)
212  rb_erase(&n->rb_node_in, &hists->entries_collapsed);
213 
214  hist_entry__free(n);
215  --hists->nr_entries;
216  }
217  }
218 }
219 
220 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
221 {
222  return __hists__decay_entries(hists, zap_user, zap_kernel, false);
223 }
224 
225 void hists__decay_entries_threaded(struct hists *hists,
226  bool zap_user, bool zap_kernel)
227 {
228  return __hists__decay_entries(hists, zap_user, zap_kernel, true);
229 }
230 
231 /*
232  * histogram, sorted on item, collects periods
233  */
234 
235 static struct hist_entry *hist_entry__new(struct hist_entry *template)
236 {
237  size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
238  struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
239 
240  if (he != NULL) {
241  *he = *template;
242 
243  if (he->ms.map)
244  he->ms.map->referenced = true;
246  callchain_init(he->callchain);
247  }
248 
249  return he;
250 }
251 
252 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
253 {
254  if (!h->filtered) {
255  hists__calc_col_len(hists, h);
256  ++hists->nr_entries;
257  hists->stats.total_period += h->stat.period;
258  }
259 }
260 
261 static u8 symbol__parent_filter(const struct symbol *parent)
262 {
263  if (symbol_conf.exclude_other && parent == NULL)
264  return 1 << HIST_FILTER__PARENT;
265  return 0;
266 }
267 
268 static struct hist_entry *add_hist_entry(struct hists *hists,
269  struct hist_entry *entry,
270  struct addr_location *al,
271  u64 period)
272 {
273  struct rb_node **p;
274  struct rb_node *parent = NULL;
275  struct hist_entry *he;
276  int cmp;
277 
278  pthread_mutex_lock(&hists->lock);
279 
280  p = &hists->entries_in->rb_node;
281 
282  while (*p != NULL) {
283  parent = *p;
284  he = rb_entry(parent, struct hist_entry, rb_node_in);
285 
286  cmp = hist_entry__cmp(entry, he);
287 
288  if (!cmp) {
289  he_stat__add_period(&he->stat, period);
290 
291  /* If the map of an existing hist_entry has
292  * become out-of-date due to an exec() or
293  * similar, update it. Otherwise we will
294  * mis-adjust symbol addresses when computing
295  * the history counter to increment.
296  */
297  if (he->ms.map != entry->ms.map) {
298  he->ms.map = entry->ms.map;
299  if (he->ms.map)
300  he->ms.map->referenced = true;
301  }
302  goto out;
303  }
304 
305  if (cmp < 0)
306  p = &(*p)->rb_left;
307  else
308  p = &(*p)->rb_right;
309  }
310 
311  he = hist_entry__new(entry);
312  if (!he)
313  goto out_unlock;
314 
315  rb_link_node(&he->rb_node_in, parent, p);
316  rb_insert_color(&he->rb_node_in, hists->entries_in);
317 out:
318  hist_entry__add_cpumode_period(he, al->cpumode, period);
319 out_unlock:
320  pthread_mutex_unlock(&hists->lock);
321  return he;
322 }
323 
324 struct hist_entry *__hists__add_branch_entry(struct hists *self,
325  struct addr_location *al,
326  struct symbol *sym_parent,
327  struct branch_info *bi,
328  u64 period)
329 {
330  struct hist_entry entry = {
331  .thread = al->thread,
332  .ms = {
333  .map = bi->to.map,
334  .sym = bi->to.sym,
335  },
336  .cpu = al->cpu,
337  .ip = bi->to.addr,
338  .level = al->level,
339  .stat = {
340  .period = period,
341  .nr_events = 1,
342  },
343  .parent = sym_parent,
344  .filtered = symbol__parent_filter(sym_parent),
345  .branch_info = bi,
346  .hists = self,
347  };
348 
349  return add_hist_entry(self, &entry, al, period);
350 }
351 
352 struct hist_entry *__hists__add_entry(struct hists *self,
353  struct addr_location *al,
354  struct symbol *sym_parent, u64 period)
355 {
356  struct hist_entry entry = {
357  .thread = al->thread,
358  .ms = {
359  .map = al->map,
360  .sym = al->sym,
361  },
362  .cpu = al->cpu,
363  .ip = al->addr,
364  .level = al->level,
365  .stat = {
366  .period = period,
367  .nr_events = 1,
368  },
369  .parent = sym_parent,
370  .filtered = symbol__parent_filter(sym_parent),
371  .hists = self,
372  };
373 
374  return add_hist_entry(self, &entry, al, period);
375 }
376 
377 int64_t
379 {
380  struct sort_entry *se;
381  int64_t cmp = 0;
382 
384  cmp = se->se_cmp(left, right);
385  if (cmp)
386  break;
387  }
388 
389  return cmp;
390 }
391 
392 int64_t
394 {
395  struct sort_entry *se;
396  int64_t cmp = 0;
397 
399  int64_t (*f)(struct hist_entry *, struct hist_entry *);
400 
401  f = se->se_collapse ?: se->se_cmp;
402 
403  cmp = f(left, right);
404  if (cmp)
405  break;
406  }
407 
408  return cmp;
409 }
410 
411 void hist_entry__free(struct hist_entry *he)
412 {
413  free(he);
414 }
415 
416 /*
417  * collapse the histogram
418  */
419 
420 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
421  struct rb_root *root,
422  struct hist_entry *he)
423 {
424  struct rb_node **p = &root->rb_node;
425  struct rb_node *parent = NULL;
426  struct hist_entry *iter;
427  int64_t cmp;
428 
429  while (*p != NULL) {
430  parent = *p;
431  iter = rb_entry(parent, struct hist_entry, rb_node_in);
432 
433  cmp = hist_entry__collapse(iter, he);
434 
435  if (!cmp) {
436  he_stat__add_stat(&iter->stat, &he->stat);
437 
439  callchain_cursor_reset(&callchain_cursor);
441  iter->callchain,
442  he->callchain);
443  }
444  hist_entry__free(he);
445  return false;
446  }
447 
448  if (cmp < 0)
449  p = &(*p)->rb_left;
450  else
451  p = &(*p)->rb_right;
452  }
453 
454  rb_link_node(&he->rb_node_in, parent, p);
455  rb_insert_color(&he->rb_node_in, root);
456  return true;
457 }
458 
459 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
460 {
461  struct rb_root *root;
462 
463  pthread_mutex_lock(&hists->lock);
464 
465  root = hists->entries_in;
466  if (++hists->entries_in > &hists->entries_in_array[1])
467  hists->entries_in = &hists->entries_in_array[0];
468 
469  pthread_mutex_unlock(&hists->lock);
470 
471  return root;
472 }
473 
474 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
475 {
476  hists__filter_entry_by_dso(hists, he);
477  hists__filter_entry_by_thread(hists, he);
478  hists__filter_entry_by_symbol(hists, he);
479 }
480 
481 static void __hists__collapse_resort(struct hists *hists, bool threaded)
482 {
483  struct rb_root *root;
484  struct rb_node *next;
485  struct hist_entry *n;
486 
487  if (!sort__need_collapse && !threaded)
488  return;
489 
490  root = hists__get_rotate_entries_in(hists);
491  next = rb_first(root);
492 
493  while (next) {
494  n = rb_entry(next, struct hist_entry, rb_node_in);
495  next = rb_next(&n->rb_node_in);
496 
497  rb_erase(&n->rb_node_in, root);
498  if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
499  /*
500  * If it wasn't combined with one of the entries already
501  * collapsed, we need to apply the filters that may have
502  * been set by, say, the hist_browser.
503  */
504  hists__apply_filters(hists, n);
505  }
506  }
507 }
508 
509 void hists__collapse_resort(struct hists *hists)
510 {
511  return __hists__collapse_resort(hists, false);
512 }
513 
514 void hists__collapse_resort_threaded(struct hists *hists)
515 {
516  return __hists__collapse_resort(hists, true);
517 }
518 
519 /*
520  * reverse the map, sort on period.
521  */
522 
523 static void __hists__insert_output_entry(struct rb_root *entries,
524  struct hist_entry *he,
525  u64 min_callchain_hits)
526 {
527  struct rb_node **p = &entries->rb_node;
528  struct rb_node *parent = NULL;
529  struct hist_entry *iter;
530 
532  callchain_param.sort(&he->sorted_chain, he->callchain,
533  min_callchain_hits, &callchain_param);
534 
535  while (*p != NULL) {
536  parent = *p;
537  iter = rb_entry(parent, struct hist_entry, rb_node);
538 
539  if (he->stat.period > iter->stat.period)
540  p = &(*p)->rb_left;
541  else
542  p = &(*p)->rb_right;
543  }
544 
545  rb_link_node(&he->rb_node, parent, p);
546  rb_insert_color(&he->rb_node, entries);
547 }
548 
549 static void __hists__output_resort(struct hists *hists, bool threaded)
550 {
551  struct rb_root *root;
552  struct rb_node *next;
553  struct hist_entry *n;
554  u64 min_callchain_hits;
555 
556  min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
557 
558  if (sort__need_collapse || threaded)
559  root = &hists->entries_collapsed;
560  else
561  root = hists->entries_in;
562 
563  next = rb_first(root);
564  hists->entries = RB_ROOT;
565 
566  hists->nr_entries = 0;
567  hists->stats.total_period = 0;
568  hists__reset_col_len(hists);
569 
570  while (next) {
571  n = rb_entry(next, struct hist_entry, rb_node_in);
572  next = rb_next(&n->rb_node_in);
573 
574  __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
575  hists__inc_nr_entries(hists, n);
576  }
577 }
578 
579 void hists__output_resort(struct hists *hists)
580 {
581  return __hists__output_resort(hists, false);
582 }
583 
584 void hists__output_resort_threaded(struct hists *hists)
585 {
586  return __hists__output_resort(hists, true);
587 }
588 
589 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
590  enum hist_filter filter)
591 {
592  h->filtered &= ~(1 << filter);
593  if (h->filtered)
594  return;
595 
596  ++hists->nr_entries;
597  if (h->ms.unfolded)
598  hists->nr_entries += h->nr_rows;
599  h->row_offset = 0;
600  hists->stats.total_period += h->stat.period;
601  hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
602 
603  hists__calc_col_len(hists, h);
604 }
605 
606 
607 static bool hists__filter_entry_by_dso(struct hists *hists,
608  struct hist_entry *he)
609 {
610  if (hists->dso_filter != NULL &&
611  (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
612  he->filtered |= (1 << HIST_FILTER__DSO);
613  return true;
614  }
615 
616  return false;
617 }
618 
619 void hists__filter_by_dso(struct hists *hists)
620 {
621  struct rb_node *nd;
622 
623  hists->nr_entries = hists->stats.total_period = 0;
624  hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
625  hists__reset_col_len(hists);
626 
627  for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
628  struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
629 
630  if (symbol_conf.exclude_other && !h->parent)
631  continue;
632 
633  if (hists__filter_entry_by_dso(hists, h))
634  continue;
635 
636  hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
637  }
638 }
639 
640 static bool hists__filter_entry_by_thread(struct hists *hists,
641  struct hist_entry *he)
642 {
643  if (hists->thread_filter != NULL &&
644  he->thread != hists->thread_filter) {
645  he->filtered |= (1 << HIST_FILTER__THREAD);
646  return true;
647  }
648 
649  return false;
650 }
651 
652 void hists__filter_by_thread(struct hists *hists)
653 {
654  struct rb_node *nd;
655 
656  hists->nr_entries = hists->stats.total_period = 0;
657  hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
658  hists__reset_col_len(hists);
659 
660  for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
661  struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
662 
663  if (hists__filter_entry_by_thread(hists, h))
664  continue;
665 
666  hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
667  }
668 }
669 
670 static bool hists__filter_entry_by_symbol(struct hists *hists,
671  struct hist_entry *he)
672 {
673  if (hists->symbol_filter_str != NULL &&
674  (!he->ms.sym || strstr(he->ms.sym->name,
675  hists->symbol_filter_str) == NULL)) {
676  he->filtered |= (1 << HIST_FILTER__SYMBOL);
677  return true;
678  }
679 
680  return false;
681 }
682 
683 void hists__filter_by_symbol(struct hists *hists)
684 {
685  struct rb_node *nd;
686 
687  hists->nr_entries = hists->stats.total_period = 0;
688  hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
689  hists__reset_col_len(hists);
690 
691  for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
692  struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
693 
694  if (hists__filter_entry_by_symbol(hists, h))
695  continue;
696 
697  hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
698  }
699 }
700 
701 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
702 {
703  return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
704 }
705 
706 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
707 {
708  return symbol__annotate(he->ms.sym, he->ms.map, privsize);
709 }
710 
711 void hists__inc_nr_events(struct hists *hists, u32 type)
712 {
713  ++hists->stats.nr_events[0];
714  ++hists->stats.nr_events[type];
715 }