Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
builtin-timechart.c
Go to the documentation of this file.
1 /*
2  * builtin-timechart.c - make an svg timechart of system activity
3  *
4  * (C) Copyright 2009 Intel Corporation
5  *
6  * Authors:
7  * Arjan van de Ven <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14 
15 #include "builtin.h"
16 
17 #include "util/util.h"
18 
19 #include "util/color.h"
20 #include <linux/list.h>
21 #include "util/cache.h"
22 #include "util/evsel.h"
23 #include <linux/rbtree.h>
24 #include "util/symbol.h"
25 #include "util/callchain.h"
26 #include "util/strlist.h"
27 
28 #include "perf.h"
29 #include "util/header.h"
30 #include "util/parse-options.h"
31 #include "util/parse-events.h"
32 #include "util/event.h"
33 #include "util/session.h"
34 #include "util/svghelper.h"
35 #include "util/tool.h"
36 
37 #define SUPPORT_OLD_POWER_EVENTS 1
38 #define PWR_EVENT_EXIT -1
39 
40 
41 static unsigned int numcpus;
42 static u64 min_freq; /* Lowest CPU frequency seen */
43 static u64 max_freq; /* Highest CPU frequency seen */
44 static u64 turbo_frequency;
45 
46 static u64 first_time, last_time;
47 
48 static bool power_only;
49 
50 
51 struct per_pid;
52 struct per_pidcomm;
53 
54 struct cpu_sample;
55 struct power_event;
56 struct wake_event;
57 
58 struct sample_wrapper;
59 
60 /*
61  * Datastructure layout:
62  * We keep an list of "pid"s, matching the kernels notion of a task struct.
63  * Each "pid" entry, has a list of "comm"s.
64  * this is because we want to track different programs different, while
65  * exec will reuse the original pid (by design).
66  * Each comm has a list of samples that will be used to draw
67  * final graph.
68  */
69 
70 struct per_pid {
71  struct per_pid *next;
72 
73  int pid;
74  int ppid;
75 
79  int display;
80 
81  struct per_pidcomm *all;
83 };
84 
85 
86 struct per_pidcomm {
87  struct per_pidcomm *next;
88 
92 
93  int Y;
94  int display;
95 
96  long state;
98 
99  char *comm;
100 
102 };
103 
106 
108  unsigned char data[0];
109 };
110 
111 #define TYPE_NONE 0
112 #define TYPE_RUNNING 1
113 #define TYPE_WAITING 2
114 #define TYPE_BLOCKED 3
115 
116 struct cpu_sample {
117  struct cpu_sample *next;
118 
121  int type;
122  int cpu;
123 };
124 
125 static struct per_pid *all_data;
126 
127 #define CSTATE 1
128 #define PSTATE 2
129 
130 struct power_event {
131  struct power_event *next;
132  int type;
133  int state;
136  int cpu;
137 };
138 
139 struct wake_event {
140  struct wake_event *next;
141  int waker;
142  int wakee;
144 };
145 
146 static struct power_event *power_events;
147 static struct wake_event *wake_events;
148 
149 struct process_filter;
151  char *name;
152  int pid;
154 };
155 
156 static struct process_filter *process_filter;
157 
158 
159 static struct per_pid *find_create_pid(int pid)
160 {
161  struct per_pid *cursor = all_data;
162 
163  while (cursor) {
164  if (cursor->pid == pid)
165  return cursor;
166  cursor = cursor->next;
167  }
168  cursor = zalloc(sizeof(*cursor));
169  assert(cursor != NULL);
170  cursor->pid = pid;
171  cursor->next = all_data;
172  all_data = cursor;
173  return cursor;
174 }
175 
176 static void pid_set_comm(int pid, char *comm)
177 {
178  struct per_pid *p;
179  struct per_pidcomm *c;
180  p = find_create_pid(pid);
181  c = p->all;
182  while (c) {
183  if (c->comm && strcmp(c->comm, comm) == 0) {
184  p->current = c;
185  return;
186  }
187  if (!c->comm) {
188  c->comm = strdup(comm);
189  p->current = c;
190  return;
191  }
192  c = c->next;
193  }
194  c = zalloc(sizeof(*c));
195  assert(c != NULL);
196  c->comm = strdup(comm);
197  p->current = c;
198  c->next = p->all;
199  p->all = c;
200 }
201 
202 static void pid_fork(int pid, int ppid, u64 timestamp)
203 {
204  struct per_pid *p, *pp;
205  p = find_create_pid(pid);
206  pp = find_create_pid(ppid);
207  p->ppid = ppid;
208  if (pp->current && pp->current->comm && !p->current)
209  pid_set_comm(pid, pp->current->comm);
210 
211  p->start_time = timestamp;
212  if (p->current) {
213  p->current->start_time = timestamp;
214  p->current->state_since = timestamp;
215  }
216 }
217 
218 static void pid_exit(int pid, u64 timestamp)
219 {
220  struct per_pid *p;
221  p = find_create_pid(pid);
222  p->end_time = timestamp;
223  if (p->current)
224  p->current->end_time = timestamp;
225 }
226 
227 static void
228 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
229 {
230  struct per_pid *p;
231  struct per_pidcomm *c;
232  struct cpu_sample *sample;
233 
234  p = find_create_pid(pid);
235  c = p->current;
236  if (!c) {
237  c = zalloc(sizeof(*c));
238  assert(c != NULL);
239  p->current = c;
240  c->next = p->all;
241  p->all = c;
242  }
243 
244  sample = zalloc(sizeof(*sample));
245  assert(sample != NULL);
246  sample->start_time = start;
247  sample->end_time = end;
248  sample->type = type;
249  sample->next = c->samples;
250  sample->cpu = cpu;
251  c->samples = sample;
252 
253  if (sample->type == TYPE_RUNNING && end > start && start > 0) {
254  c->total_time += (end-start);
255  p->total_time += (end-start);
256  }
257 
258  if (c->start_time == 0 || c->start_time > start)
259  c->start_time = start;
260  if (p->start_time == 0 || p->start_time > start)
261  p->start_time = start;
262 }
263 
264 #define MAX_CPUS 4096
265 
266 static u64 cpus_cstate_start_times[MAX_CPUS];
267 static int cpus_cstate_state[MAX_CPUS];
268 static u64 cpus_pstate_start_times[MAX_CPUS];
269 static u64 cpus_pstate_state[MAX_CPUS];
270 
271 static int process_comm_event(struct perf_tool *tool __maybe_unused,
272  union perf_event *event,
273  struct perf_sample *sample __maybe_unused,
274  struct machine *machine __maybe_unused)
275 {
276  pid_set_comm(event->comm.tid, event->comm.comm);
277  return 0;
278 }
279 
280 static int process_fork_event(struct perf_tool *tool __maybe_unused,
281  union perf_event *event,
282  struct perf_sample *sample __maybe_unused,
283  struct machine *machine __maybe_unused)
284 {
285  pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
286  return 0;
287 }
288 
289 static int process_exit_event(struct perf_tool *tool __maybe_unused,
290  union perf_event *event,
291  struct perf_sample *sample __maybe_unused,
292  struct machine *machine __maybe_unused)
293 {
294  pid_exit(event->fork.pid, event->fork.time);
295  return 0;
296 }
297 
298 struct trace_entry {
299  unsigned short type;
300  unsigned char flags;
301  unsigned char preempt_count;
302  int pid;
304 };
305 
306 #ifdef SUPPORT_OLD_POWER_EVENTS
307 static int use_old_power_events;
309  struct trace_entry te;
313 };
314 #endif
315 
317  struct trace_entry te;
320 };
321 
322 #define TASK_COMM_LEN 16
323 struct wakeup_entry {
324  struct trace_entry te;
325  char comm[TASK_COMM_LEN];
326  int pid;
327  int prio;
328  int success;
329 };
330 
331 /*
332  * trace_flag_type is an enumeration that holds different
333  * states when a trace occurs. These are:
334  * IRQS_OFF - interrupts were disabled
335  * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
336  * NEED_RESCED - reschedule is requested
337  * HARDIRQ - inside an interrupt handler
338  * SOFTIRQ - inside a softirq handler
339  */
346 };
347 
348 
349 
350 struct sched_switch {
351  struct trace_entry te;
353  int prev_pid;
355  long prev_state; /* Arjan weeps. */
357  int next_pid;
359 };
360 
361 static void c_state_start(int cpu, u64 timestamp, int state)
362 {
363  cpus_cstate_start_times[cpu] = timestamp;
364  cpus_cstate_state[cpu] = state;
365 }
366 
367 static void c_state_end(int cpu, u64 timestamp)
368 {
369  struct power_event *pwr = zalloc(sizeof(*pwr));
370 
371  if (!pwr)
372  return;
373 
374  pwr->state = cpus_cstate_state[cpu];
375  pwr->start_time = cpus_cstate_start_times[cpu];
376  pwr->end_time = timestamp;
377  pwr->cpu = cpu;
378  pwr->type = CSTATE;
379  pwr->next = power_events;
380 
381  power_events = pwr;
382 }
383 
384 static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
385 {
386  struct power_event *pwr;
387 
388  if (new_freq > 8000000) /* detect invalid data */
389  return;
390 
391  pwr = zalloc(sizeof(*pwr));
392  if (!pwr)
393  return;
394 
395  pwr->state = cpus_pstate_state[cpu];
396  pwr->start_time = cpus_pstate_start_times[cpu];
397  pwr->end_time = timestamp;
398  pwr->cpu = cpu;
399  pwr->type = PSTATE;
400  pwr->next = power_events;
401 
402  if (!pwr->start_time)
403  pwr->start_time = first_time;
404 
405  power_events = pwr;
406 
407  cpus_pstate_state[cpu] = new_freq;
408  cpus_pstate_start_times[cpu] = timestamp;
409 
410  if ((u64)new_freq > max_freq)
411  max_freq = new_freq;
412 
413  if (new_freq < min_freq || min_freq == 0)
414  min_freq = new_freq;
415 
416  if (new_freq == max_freq - 1000)
417  turbo_frequency = max_freq;
418 }
419 
420 static void
421 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
422 {
423  struct per_pid *p;
424  struct wakeup_entry *wake = (void *)te;
425  struct wake_event *we = zalloc(sizeof(*we));
426 
427  if (!we)
428  return;
429 
430  we->time = timestamp;
431  we->waker = pid;
432 
433  if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
434  we->waker = -1;
435 
436  we->wakee = wake->pid;
437  we->next = wake_events;
438  wake_events = we;
439  p = find_create_pid(we->wakee);
440 
441  if (p && p->current && p->current->state == TYPE_NONE) {
442  p->current->state_since = timestamp;
443  p->current->state = TYPE_WAITING;
444  }
445  if (p && p->current && p->current->state == TYPE_BLOCKED) {
446  pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
447  p->current->state_since = timestamp;
448  p->current->state = TYPE_WAITING;
449  }
450 }
451 
452 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
453 {
454  struct per_pid *p = NULL, *prev_p;
455  struct sched_switch *sw = (void *)te;
456 
457 
458  prev_p = find_create_pid(sw->prev_pid);
459 
460  p = find_create_pid(sw->next_pid);
461 
462  if (prev_p->current && prev_p->current->state != TYPE_NONE)
463  pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
464  if (p && p->current) {
465  if (p->current->state != TYPE_NONE)
466  pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
467 
468  p->current->state_since = timestamp;
469  p->current->state = TYPE_RUNNING;
470  }
471 
472  if (prev_p->current) {
473  prev_p->current->state = TYPE_NONE;
474  prev_p->current->state_since = timestamp;
475  if (sw->prev_state & 2)
476  prev_p->current->state = TYPE_BLOCKED;
477  if (sw->prev_state == 0)
478  prev_p->current->state = TYPE_WAITING;
479  }
480 }
481 
482 
483 static int process_sample_event(struct perf_tool *tool __maybe_unused,
484  union perf_event *event __maybe_unused,
485  struct perf_sample *sample,
486  struct perf_evsel *evsel,
487  struct machine *machine __maybe_unused)
488 {
489  struct trace_entry *te;
490 
491  if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
492  if (!first_time || first_time > sample->time)
493  first_time = sample->time;
494  if (last_time < sample->time)
495  last_time = sample->time;
496  }
497 
498  te = (void *)sample->raw_data;
499  if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
500  char *event_str;
501 #ifdef SUPPORT_OLD_POWER_EVENTS
502  struct power_entry_old *peo;
503  peo = (void *)te;
504 #endif
505  /*
506  * FIXME: use evsel, its already mapped from id to perf_evsel,
507  * remove perf_header__find_event infrastructure bits.
508  * Mapping all these "power:cpu_idle" strings to the tracepoint
509  * ID and then just comparing against evsel->attr.config.
510  *
511  * e.g.:
512  *
513  * if (evsel->attr.config == power_cpu_idle_id)
514  */
515  event_str = perf_header__find_event(te->type);
516 
517  if (!event_str)
518  return 0;
519 
520  if (sample->cpu > numcpus)
521  numcpus = sample->cpu;
522 
523  if (strcmp(event_str, "power:cpu_idle") == 0) {
524  struct power_processor_entry *ppe = (void *)te;
525  if (ppe->state == (u32)PWR_EVENT_EXIT)
526  c_state_end(ppe->cpu_id, sample->time);
527  else
528  c_state_start(ppe->cpu_id, sample->time,
529  ppe->state);
530  }
531  else if (strcmp(event_str, "power:cpu_frequency") == 0) {
532  struct power_processor_entry *ppe = (void *)te;
533  p_state_change(ppe->cpu_id, sample->time, ppe->state);
534  }
535 
536  else if (strcmp(event_str, "sched:sched_wakeup") == 0)
537  sched_wakeup(sample->cpu, sample->time, sample->pid, te);
538 
539  else if (strcmp(event_str, "sched:sched_switch") == 0)
540  sched_switch(sample->cpu, sample->time, te);
541 
542 #ifdef SUPPORT_OLD_POWER_EVENTS
543  if (use_old_power_events) {
544  if (strcmp(event_str, "power:power_start") == 0)
545  c_state_start(peo->cpu_id, sample->time,
546  peo->value);
547 
548  else if (strcmp(event_str, "power:power_end") == 0)
549  c_state_end(sample->cpu, sample->time);
550 
551  else if (strcmp(event_str,
552  "power:power_frequency") == 0)
553  p_state_change(peo->cpu_id, sample->time,
554  peo->value);
555  }
556 #endif
557  }
558  return 0;
559 }
560 
561 /*
562  * After the last sample we need to wrap up the current C/P state
563  * and close out each CPU for these.
564  */
565 static void end_sample_processing(void)
566 {
567  u64 cpu;
568  struct power_event *pwr;
569 
570  for (cpu = 0; cpu <= numcpus; cpu++) {
571  /* C state */
572 #if 0
573  pwr = zalloc(sizeof(*pwr));
574  if (!pwr)
575  return;
576 
577  pwr->state = cpus_cstate_state[cpu];
578  pwr->start_time = cpus_cstate_start_times[cpu];
579  pwr->end_time = last_time;
580  pwr->cpu = cpu;
581  pwr->type = CSTATE;
582  pwr->next = power_events;
583 
584  power_events = pwr;
585 #endif
586  /* P state */
587 
588  pwr = zalloc(sizeof(*pwr));
589  if (!pwr)
590  return;
591 
592  pwr->state = cpus_pstate_state[cpu];
593  pwr->start_time = cpus_pstate_start_times[cpu];
594  pwr->end_time = last_time;
595  pwr->cpu = cpu;
596  pwr->type = PSTATE;
597  pwr->next = power_events;
598 
599  if (!pwr->start_time)
600  pwr->start_time = first_time;
601  if (!pwr->state)
602  pwr->state = min_freq;
603  power_events = pwr;
604  }
605 }
606 
607 /*
608  * Sort the pid datastructure
609  */
610 static void sort_pids(void)
611 {
612  struct per_pid *new_list, *p, *cursor, *prev;
613  /* sort by ppid first, then by pid, lowest to highest */
614 
615  new_list = NULL;
616 
617  while (all_data) {
618  p = all_data;
619  all_data = p->next;
620  p->next = NULL;
621 
622  if (new_list == NULL) {
623  new_list = p;
624  p->next = NULL;
625  continue;
626  }
627  prev = NULL;
628  cursor = new_list;
629  while (cursor) {
630  if (cursor->ppid > p->ppid ||
631  (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
632  /* must insert before */
633  if (prev) {
634  p->next = prev->next;
635  prev->next = p;
636  cursor = NULL;
637  continue;
638  } else {
639  p->next = new_list;
640  new_list = p;
641  cursor = NULL;
642  continue;
643  }
644  }
645 
646  prev = cursor;
647  cursor = cursor->next;
648  if (!cursor)
649  prev->next = p;
650  }
651  }
652  all_data = new_list;
653 }
654 
655 
656 static void draw_c_p_states(void)
657 {
658  struct power_event *pwr;
659  pwr = power_events;
660 
661  /*
662  * two pass drawing so that the P state bars are on top of the C state blocks
663  */
664  while (pwr) {
665  if (pwr->type == CSTATE)
666  svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
667  pwr = pwr->next;
668  }
669 
670  pwr = power_events;
671  while (pwr) {
672  if (pwr->type == PSTATE) {
673  if (!pwr->state)
674  pwr->state = min_freq;
675  svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
676  }
677  pwr = pwr->next;
678  }
679 }
680 
681 static void draw_wakeups(void)
682 {
683  struct wake_event *we;
684  struct per_pid *p;
685  struct per_pidcomm *c;
686 
687  we = wake_events;
688  while (we) {
689  int from = 0, to = 0;
690  char *task_from = NULL, *task_to = NULL;
691 
692  /* locate the column of the waker and wakee */
693  p = all_data;
694  while (p) {
695  if (p->pid == we->waker || p->pid == we->wakee) {
696  c = p->all;
697  while (c) {
698  if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
699  if (p->pid == we->waker && !from) {
700  from = c->Y;
701  task_from = strdup(c->comm);
702  }
703  if (p->pid == we->wakee && !to) {
704  to = c->Y;
705  task_to = strdup(c->comm);
706  }
707  }
708  c = c->next;
709  }
710  c = p->all;
711  while (c) {
712  if (p->pid == we->waker && !from) {
713  from = c->Y;
714  task_from = strdup(c->comm);
715  }
716  if (p->pid == we->wakee && !to) {
717  to = c->Y;
718  task_to = strdup(c->comm);
719  }
720  c = c->next;
721  }
722  }
723  p = p->next;
724  }
725 
726  if (!task_from) {
727  task_from = malloc(40);
728  sprintf(task_from, "[%i]", we->waker);
729  }
730  if (!task_to) {
731  task_to = malloc(40);
732  sprintf(task_to, "[%i]", we->wakee);
733  }
734 
735  if (we->waker == -1)
736  svg_interrupt(we->time, to);
737  else if (from && to && abs(from - to) == 1)
738  svg_wakeline(we->time, from, to);
739  else
740  svg_partial_wakeline(we->time, from, task_from, to, task_to);
741  we = we->next;
742 
743  free(task_from);
744  free(task_to);
745  }
746 }
747 
748 static void draw_cpu_usage(void)
749 {
750  struct per_pid *p;
751  struct per_pidcomm *c;
752  struct cpu_sample *sample;
753  p = all_data;
754  while (p) {
755  c = p->all;
756  while (c) {
757  sample = c->samples;
758  while (sample) {
759  if (sample->type == TYPE_RUNNING)
760  svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
761 
762  sample = sample->next;
763  }
764  c = c->next;
765  }
766  p = p->next;
767  }
768 }
769 
770 static void draw_process_bars(void)
771 {
772  struct per_pid *p;
773  struct per_pidcomm *c;
774  struct cpu_sample *sample;
775  int Y = 0;
776 
777  Y = 2 * numcpus + 2;
778 
779  p = all_data;
780  while (p) {
781  c = p->all;
782  while (c) {
783  if (!c->display) {
784  c->Y = 0;
785  c = c->next;
786  continue;
787  }
788 
789  svg_box(Y, c->start_time, c->end_time, "process");
790  sample = c->samples;
791  while (sample) {
792  if (sample->type == TYPE_RUNNING)
793  svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
794  if (sample->type == TYPE_BLOCKED)
795  svg_box(Y, sample->start_time, sample->end_time, "blocked");
796  if (sample->type == TYPE_WAITING)
797  svg_waiting(Y, sample->start_time, sample->end_time);
798  sample = sample->next;
799  }
800 
801  if (c->comm) {
802  char comm[256];
803  if (c->total_time > 5000000000) /* 5 seconds */
804  sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
805  else
806  sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
807 
808  svg_text(Y, c->start_time, comm);
809  }
810  c->Y = Y;
811  Y++;
812  c = c->next;
813  }
814  p = p->next;
815  }
816 }
817 
818 static void add_process_filter(const char *string)
819 {
820  int pid = strtoull(string, NULL, 10);
821  struct process_filter *filt = malloc(sizeof(*filt));
822 
823  if (!filt)
824  return;
825 
826  filt->name = strdup(string);
827  filt->pid = pid;
828  filt->next = process_filter;
829 
830  process_filter = filt;
831 }
832 
833 static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
834 {
835  struct process_filter *filt;
836  if (!process_filter)
837  return 1;
838 
839  filt = process_filter;
840  while (filt) {
841  if (filt->pid && p->pid == filt->pid)
842  return 1;
843  if (strcmp(filt->name, c->comm) == 0)
844  return 1;
845  filt = filt->next;
846  }
847  return 0;
848 }
849 
850 static int determine_display_tasks_filtered(void)
851 {
852  struct per_pid *p;
853  struct per_pidcomm *c;
854  int count = 0;
855 
856  p = all_data;
857  while (p) {
858  p->display = 0;
859  if (p->start_time == 1)
860  p->start_time = first_time;
861 
862  /* no exit marker, task kept running to the end */
863  if (p->end_time == 0)
864  p->end_time = last_time;
865 
866  c = p->all;
867 
868  while (c) {
869  c->display = 0;
870 
871  if (c->start_time == 1)
872  c->start_time = first_time;
873 
874  if (passes_filter(p, c)) {
875  c->display = 1;
876  p->display = 1;
877  count++;
878  }
879 
880  if (c->end_time == 0)
881  c->end_time = last_time;
882 
883  c = c->next;
884  }
885  p = p->next;
886  }
887  return count;
888 }
889 
890 static int determine_display_tasks(u64 threshold)
891 {
892  struct per_pid *p;
893  struct per_pidcomm *c;
894  int count = 0;
895 
896  if (process_filter)
897  return determine_display_tasks_filtered();
898 
899  p = all_data;
900  while (p) {
901  p->display = 0;
902  if (p->start_time == 1)
903  p->start_time = first_time;
904 
905  /* no exit marker, task kept running to the end */
906  if (p->end_time == 0)
907  p->end_time = last_time;
908  if (p->total_time >= threshold && !power_only)
909  p->display = 1;
910 
911  c = p->all;
912 
913  while (c) {
914  c->display = 0;
915 
916  if (c->start_time == 1)
917  c->start_time = first_time;
918 
919  if (c->total_time >= threshold && !power_only) {
920  c->display = 1;
921  count++;
922  }
923 
924  if (c->end_time == 0)
925  c->end_time = last_time;
926 
927  c = c->next;
928  }
929  p = p->next;
930  }
931  return count;
932 }
933 
934 
935 
936 #define TIME_THRESH 10000000
937 
938 static void write_svg_file(const char *filename)
939 {
940  u64 i;
941  int count;
942 
943  numcpus++;
944 
945 
946  count = determine_display_tasks(TIME_THRESH);
947 
948  /* We'd like to show at least 15 tasks; be less picky if we have fewer */
949  if (count < 15)
950  count = determine_display_tasks(TIME_THRESH / 10);
951 
952  open_svg(filename, numcpus, count, first_time, last_time);
953 
954  svg_time_grid();
955  svg_legenda();
956 
957  for (i = 0; i < numcpus; i++)
958  svg_cpu_box(i, max_freq, turbo_frequency);
959 
960  draw_cpu_usage();
961  draw_process_bars();
962  draw_c_p_states();
963  draw_wakeups();
964 
965  svg_close();
966 }
967 
968 static int __cmd_timechart(const char *input_name, const char *output_name)
969 {
970  struct perf_tool perf_timechart = {
971  .comm = process_comm_event,
972  .fork = process_fork_event,
973  .exit = process_exit_event,
974  .sample = process_sample_event,
975  .ordered_samples = true,
976  };
977  struct perf_session *session = perf_session__new(input_name, O_RDONLY,
978  0, false, &perf_timechart);
979  int ret = -EINVAL;
980 
981  if (session == NULL)
982  return -ENOMEM;
983 
984  if (!perf_session__has_traces(session, "timechart record"))
985  goto out_delete;
986 
987  ret = perf_session__process_events(session, &perf_timechart);
988  if (ret)
989  goto out_delete;
990 
991  end_sample_processing();
992 
993  sort_pids();
994 
995  write_svg_file(output_name);
996 
997  pr_info("Written %2.1f seconds of trace to %s.\n",
998  (last_time - first_time) / 1000000000.0, output_name);
999 out_delete:
1000  perf_session__delete(session);
1001  return ret;
1002 }
1003 
1004 static int __cmd_record(int argc, const char **argv)
1005 {
1006 #ifdef SUPPORT_OLD_POWER_EVENTS
1007  const char * const record_old_args[] = {
1008  "record", "-a", "-R", "-f", "-c", "1",
1009  "-e", "power:power_start",
1010  "-e", "power:power_end",
1011  "-e", "power:power_frequency",
1012  "-e", "sched:sched_wakeup",
1013  "-e", "sched:sched_switch",
1014  };
1015 #endif
1016  const char * const record_new_args[] = {
1017  "record", "-a", "-R", "-f", "-c", "1",
1018  "-e", "power:cpu_frequency",
1019  "-e", "power:cpu_idle",
1020  "-e", "sched:sched_wakeup",
1021  "-e", "sched:sched_switch",
1022  };
1023  unsigned int rec_argc, i, j;
1024  const char **rec_argv;
1025  const char * const *record_args = record_new_args;
1026  unsigned int record_elems = ARRAY_SIZE(record_new_args);
1027 
1028 #ifdef SUPPORT_OLD_POWER_EVENTS
1029  if (!is_valid_tracepoint("power:cpu_idle") &&
1030  is_valid_tracepoint("power:power_start")) {
1031  use_old_power_events = 1;
1032  record_args = record_old_args;
1033  record_elems = ARRAY_SIZE(record_old_args);
1034  }
1035 #endif
1036 
1037  rec_argc = record_elems + argc - 1;
1038  rec_argv = calloc(rec_argc + 1, sizeof(char *));
1039 
1040  if (rec_argv == NULL)
1041  return -ENOMEM;
1042 
1043  for (i = 0; i < record_elems; i++)
1044  rec_argv[i] = strdup(record_args[i]);
1045 
1046  for (j = 1; j < (unsigned int)argc; j++, i++)
1047  rec_argv[i] = argv[j];
1048 
1049  return cmd_record(i, rec_argv, NULL);
1050 }
1051 
1052 static int
1053 parse_process(const struct option *opt __maybe_unused, const char *arg,
1054  int __maybe_unused unset)
1055 {
1056  if (arg)
1057  add_process_filter(arg);
1058  return 0;
1059 }
1060 
1061 int cmd_timechart(int argc, const char **argv,
1062  const char *prefix __maybe_unused)
1063 {
1064  const char *input_name;
1065  const char *output_name = "output.svg";
1066  const struct option options[] = {
1067  OPT_STRING('i', "input", &input_name, "file", "input file name"),
1068  OPT_STRING('o', "output", &output_name, "file", "output file name"),
1069  OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1070  OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"),
1071  OPT_CALLBACK('p', "process", NULL, "process",
1072  "process selector. Pass a pid or process name.",
1073  parse_process),
1074  OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1075  "Look for files with symbols relative to this directory"),
1076  OPT_END()
1077  };
1078  const char * const timechart_usage[] = {
1079  "perf timechart [<options>] {record}",
1080  NULL
1081  };
1082 
1083  argc = parse_options(argc, argv, options, timechart_usage,
1085 
1086  symbol__init();
1087 
1088  if (argc && !strncmp(argv[0], "rec", 3))
1089  return __cmd_record(argc, argv);
1090  else if (argc)
1091  usage_with_options(timechart_usage, options);
1092 
1093  setup_pager();
1094 
1095  return __cmd_timechart(input_name, output_name);
1096 }