Commit ce74f60eab3cc8b7a3b0cb9c29ec9b1e1abac7d2

Authored by Namhyung Kim
Committed by Arnaldo Carvalho de Melo
1 parent 9afcf930b1

perf hists: Link hist entries before inserting to an output tree

For matching and/or linking hist entries, they need to be sorted by
given sort keys.  However current hists__match/link did this on the
output trees, so that the entries in the output tree need to be resort
before doing it.

This looks not so good since we have trees for collecting or collapsing
entries before passing them to an output tree and they're already sorted
by the given sort keys.  Since we don't need to print anything at the
time of matching/linking, we can use these internal trees directly
instead of bothering with double resort on the output tree.

Its only user - at the time of this writing - perf diff can be easily
converted to use the internal tree and can save some lines too by
getting rid of unnecessary resorting codes.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1355128197-18193-3-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

Showing 2 changed files with 54 additions and 60 deletions Side-by-side Diff

tools/perf/builtin-diff.c
... ... @@ -275,43 +275,6 @@
275 275 .ordering_requires_timestamps = true,
276 276 };
277 277  
278   -static void insert_hist_entry_by_name(struct rb_root *root,
279   - struct hist_entry *he)
280   -{
281   - struct rb_node **p = &root->rb_node;
282   - struct rb_node *parent = NULL;
283   - struct hist_entry *iter;
284   -
285   - while (*p != NULL) {
286   - parent = *p;
287   - iter = rb_entry(parent, struct hist_entry, rb_node);
288   - if (hist_entry__cmp(iter, he) < 0)
289   - p = &(*p)->rb_left;
290   - else
291   - p = &(*p)->rb_right;
292   - }
293   -
294   - rb_link_node(&he->rb_node, parent, p);
295   - rb_insert_color(&he->rb_node, root);
296   -}
297   -
298   -static void hists__name_resort(struct hists *self)
299   -{
300   - struct rb_root tmp = RB_ROOT;
301   - struct rb_node *next = rb_first(&self->entries);
302   -
303   - while (next != NULL) {
304   - struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
305   -
306   - next = rb_next(&n->rb_node);
307   -
308   - rb_erase(&n->rb_node, &self->entries);
309   - insert_hist_entry_by_name(&tmp, n);
310   - }
311   -
312   - self->entries = tmp;
313   -}
314   -
315 278 static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
316 279 struct perf_evlist *evlist)
317 280 {
318 281  
319 282  
320 283  
321 284  
322 285  
323 286  
... ... @@ -324,30 +287,34 @@
324 287 return NULL;
325 288 }
326 289  
327   -static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name)
  290 +static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
328 291 {
329 292 struct perf_evsel *evsel;
330 293  
331 294 list_for_each_entry(evsel, &evlist->entries, node) {
332 295 struct hists *hists = &evsel->hists;
333 296  
334   - hists__output_resort(hists);
335   -
336   - if (name)
337   - hists__name_resort(hists);
  297 + hists__collapse_resort(hists);
338 298 }
339 299 }
340 300  
341 301 static void hists__baseline_only(struct hists *hists)
342 302 {
343   - struct rb_node *next = rb_first(&hists->entries);
  303 + struct rb_root *root;
  304 + struct rb_node *next;
344 305  
  306 + if (sort__need_collapse)
  307 + root = &hists->entries_collapsed;
  308 + else
  309 + root = hists->entries_in;
  310 +
  311 + next = rb_first(root);
345 312 while (next != NULL) {
346   - struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
  313 + struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
347 314  
348   - next = rb_next(&he->rb_node);
  315 + next = rb_next(&he->rb_node_in);
349 316 if (!hist_entry__next_pair(he)) {
350   - rb_erase(&he->rb_node, &hists->entries);
  317 + rb_erase(&he->rb_node_in, root);
351 318 hist_entry__free(he);
352 319 }
353 320 }
... ... @@ -471,6 +438,8 @@
471 438 else
472 439 hists__link(new, old);
473 440  
  441 + hists__output_resort(new);
  442 +
474 443 if (sort_compute) {
475 444 hists__precompute(new);
476 445 hists__compute_resort(new);
... ... @@ -505,8 +474,8 @@
505 474 evlist_old = older->evlist;
506 475 evlist_new = newer->evlist;
507 476  
508   - perf_evlist__resort_hists(evlist_old, true);
509   - perf_evlist__resort_hists(evlist_new, false);
  477 + perf_evlist__collapse_resort(evlist_old);
  478 + perf_evlist__collapse_resort(evlist_new);
510 479  
511 480 list_for_each_entry(evsel, &evlist_new->entries, node) {
512 481 struct perf_evsel *evsel_old;
tools/perf/util/hist.c
... ... @@ -726,16 +726,24 @@
726 726 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
727 727 struct hist_entry *pair)
728 728 {
729   - struct rb_node **p = &hists->entries.rb_node;
  729 + struct rb_root *root;
  730 + struct rb_node **p;
730 731 struct rb_node *parent = NULL;
731 732 struct hist_entry *he;
732 733 int cmp;
733 734  
  735 + if (sort__need_collapse)
  736 + root = &hists->entries_collapsed;
  737 + else
  738 + root = hists->entries_in;
  739 +
  740 + p = &root->rb_node;
  741 +
734 742 while (*p != NULL) {
735 743 parent = *p;
736   - he = rb_entry(parent, struct hist_entry, rb_node);
  744 + he = rb_entry(parent, struct hist_entry, rb_node_in);
737 745  
738   - cmp = hist_entry__cmp(he, pair);
  746 + cmp = hist_entry__collapse(he, pair);
739 747  
740 748 if (!cmp)
741 749 goto out;
... ... @@ -750,8 +758,8 @@
750 758 if (he) {
751 759 memset(&he->stat, 0, sizeof(he->stat));
752 760 he->hists = hists;
753   - rb_link_node(&he->rb_node, parent, p);
754   - rb_insert_color(&he->rb_node, &hists->entries);
  761 + rb_link_node(&he->rb_node_in, parent, p);
  762 + rb_insert_color(&he->rb_node_in, root);
755 763 hists__inc_nr_entries(hists, he);
756 764 }
757 765 out:
758 766  
759 767  
... ... @@ -761,11 +769,16 @@
761 769 static struct hist_entry *hists__find_entry(struct hists *hists,
762 770 struct hist_entry *he)
763 771 {
764   - struct rb_node *n = hists->entries.rb_node;
  772 + struct rb_node *n;
765 773  
  774 + if (sort__need_collapse)
  775 + n = hists->entries_collapsed.rb_node;
  776 + else
  777 + n = hists->entries_in->rb_node;
  778 +
766 779 while (n) {
767   - struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
768   - int64_t cmp = hist_entry__cmp(iter, he);
  780 + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
  781 + int64_t cmp = hist_entry__collapse(iter, he);
769 782  
770 783 if (cmp < 0)
771 784 n = n->rb_left;
772 785  
... ... @@ -783,11 +796,17 @@
783 796 */
784 797 void hists__match(struct hists *leader, struct hists *other)
785 798 {
  799 + struct rb_root *root;
786 800 struct rb_node *nd;
787 801 struct hist_entry *pos, *pair;
788 802  
789   - for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) {
790   - pos = rb_entry(nd, struct hist_entry, rb_node);
  803 + if (sort__need_collapse)
  804 + root = &leader->entries_collapsed;
  805 + else
  806 + root = leader->entries_in;
  807 +
  808 + for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  809 + pos = rb_entry(nd, struct hist_entry, rb_node_in);
791 810 pair = hists__find_entry(other, pos);
792 811  
793 812 if (pair)
794 813  
... ... @@ -802,11 +821,17 @@
802 821 */
803 822 int hists__link(struct hists *leader, struct hists *other)
804 823 {
  824 + struct rb_root *root;
805 825 struct rb_node *nd;
806 826 struct hist_entry *pos, *pair;
807 827  
808   - for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) {
809   - pos = rb_entry(nd, struct hist_entry, rb_node);
  828 + if (sort__need_collapse)
  829 + root = &other->entries_collapsed;
  830 + else
  831 + root = other->entries_in;
  832 +
  833 + for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  834 + pos = rb_entry(nd, struct hist_entry, rb_node_in);
810 835  
811 836 if (!hist_entry__has_pairs(pos)) {
812 837 pair = hists__add_dummy_entry(leader, pos);