Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
evlist.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18 
19 #include "parse-events.h"
20 
21 #include <sys/mman.h>
22 
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25 
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28 
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30  struct thread_map *threads)
31 {
32  int i;
33 
34  for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35  INIT_HLIST_HEAD(&evlist->heads[i]);
36  INIT_LIST_HEAD(&evlist->entries);
37  perf_evlist__set_maps(evlist, cpus, threads);
38  evlist->workload.pid = -1;
39 }
40 
42  struct thread_map *threads)
43 {
44  struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45 
46  if (evlist != NULL)
47  perf_evlist__init(evlist, cpus, threads);
48 
49  return evlist;
50 }
51 
53  struct perf_record_opts *opts)
54 {
55  struct perf_evsel *evsel, *first;
56 
57  if (evlist->cpus->map[0] < 0)
58  opts->no_inherit = true;
59 
60  first = perf_evlist__first(evlist);
61 
62  list_for_each_entry(evsel, &evlist->entries, node) {
63  perf_evsel__config(evsel, opts, first);
64 
65  if (evlist->nr_entries > 1)
66  evsel->attr.sample_type |= PERF_SAMPLE_ID;
67  }
68 }
69 
70 static void perf_evlist__purge(struct perf_evlist *evlist)
71 {
72  struct perf_evsel *pos, *n;
73 
74  list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75  list_del_init(&pos->node);
76  perf_evsel__delete(pos);
77  }
78 
79  evlist->nr_entries = 0;
80 }
81 
82 void perf_evlist__exit(struct perf_evlist *evlist)
83 {
84  free(evlist->mmap);
85  free(evlist->pollfd);
86  evlist->mmap = NULL;
87  evlist->pollfd = NULL;
88 }
89 
90 void perf_evlist__delete(struct perf_evlist *evlist)
91 {
92  perf_evlist__purge(evlist);
93  perf_evlist__exit(evlist);
94  free(evlist);
95 }
96 
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98 {
99  list_add_tail(&entry->node, &evlist->entries);
100  ++evlist->nr_entries;
101 }
102 
104  struct list_head *list,
105  int nr_entries)
106 {
107  list_splice_tail(list, &evlist->entries);
108  evlist->nr_entries += nr_entries;
109 }
110 
112 {
113  struct perf_evsel *evsel, *leader;
114 
115  leader = list_entry(list->next, struct perf_evsel, node);
116  leader->leader = NULL;
117 
118  list_for_each_entry(evsel, list, node) {
119  if (evsel != leader)
120  evsel->leader = leader;
121  }
122 }
123 
125 {
126  if (evlist->nr_entries)
128 }
129 
131 {
132  struct perf_event_attr attr = {
134  .config = PERF_COUNT_HW_CPU_CYCLES,
135  };
136  struct perf_evsel *evsel;
137 
138  event_attr_init(&attr);
139 
140  evsel = perf_evsel__new(&attr, 0);
141  if (evsel == NULL)
142  goto error;
143 
144  /* use strdup() because free(evsel) assumes name is allocated */
145  evsel->name = strdup("cycles");
146  if (!evsel->name)
147  goto error_free;
148 
149  perf_evlist__add(evlist, evsel);
150  return 0;
151 error_free:
152  perf_evsel__delete(evsel);
153 error:
154  return -ENOMEM;
155 }
156 
157 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
158  struct perf_event_attr *attrs, size_t nr_attrs)
159 {
160  struct perf_evsel *evsel, *n;
161  LIST_HEAD(head);
162  size_t i;
163 
164  for (i = 0; i < nr_attrs; i++) {
165  evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
166  if (evsel == NULL)
167  goto out_delete_partial_list;
168  list_add_tail(&evsel->node, &head);
169  }
170 
171  perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
172 
173  return 0;
174 
175 out_delete_partial_list:
176  list_for_each_entry_safe(evsel, n, &head, node)
177  perf_evsel__delete(evsel);
178  return -1;
179 }
180 
182  struct perf_event_attr *attrs, size_t nr_attrs)
183 {
184  size_t i;
185 
186  for (i = 0; i < nr_attrs; i++)
187  event_attr_init(attrs + i);
188 
189  return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
190 }
191 
192 struct perf_evsel *
194 {
195  struct perf_evsel *evsel;
196 
197  list_for_each_entry(evsel, &evlist->entries, node) {
198  if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
199  (int)evsel->attr.config == id)
200  return evsel;
201  }
202 
203  return NULL;
204 }
205 
207  const char *sys, const char *name, void *handler)
208 {
209  struct perf_evsel *evsel;
210 
211  evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
212  if (evsel == NULL)
213  return -1;
214 
215  evsel->handler.func = handler;
216  perf_evlist__add(evlist, evsel);
217  return 0;
218 }
219 
220 void perf_evlist__disable(struct perf_evlist *evlist)
221 {
222  int cpu, thread;
223  struct perf_evsel *pos;
224 
225  for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
226  list_for_each_entry(pos, &evlist->entries, node) {
227  for (thread = 0; thread < evlist->threads->nr; thread++)
228  ioctl(FD(pos, cpu, thread),
230  }
231  }
232 }
233 
234 void perf_evlist__enable(struct perf_evlist *evlist)
235 {
236  int cpu, thread;
237  struct perf_evsel *pos;
238 
239  for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
240  list_for_each_entry(pos, &evlist->entries, node) {
241  for (thread = 0; thread < evlist->threads->nr; thread++)
242  ioctl(FD(pos, cpu, thread),
244  }
245  }
246 }
247 
248 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
249 {
250  int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
251  evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
252  return evlist->pollfd != NULL ? 0 : -ENOMEM;
253 }
254 
255 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
256 {
257  fcntl(fd, F_SETFL, O_NONBLOCK);
258  evlist->pollfd[evlist->nr_fds].fd = fd;
259  evlist->pollfd[evlist->nr_fds].events = POLLIN;
260  evlist->nr_fds++;
261 }
262 
263 static void perf_evlist__id_hash(struct perf_evlist *evlist,
264  struct perf_evsel *evsel,
265  int cpu, int thread, u64 id)
266 {
267  int hash;
268  struct perf_sample_id *sid = SID(evsel, cpu, thread);
269 
270  sid->id = id;
271  sid->evsel = evsel;
272  hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
273  hlist_add_head(&sid->node, &evlist->heads[hash]);
274 }
275 
276 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
277  int cpu, int thread, u64 id)
278 {
279  perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
280  evsel->id[evsel->ids++] = id;
281 }
282 
283 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
284  struct perf_evsel *evsel,
285  int cpu, int thread, int fd)
286 {
287  u64 read_data[4] = { 0, };
288  int id_idx = 1; /* The first entry is the counter value */
289 
290  if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
291  read(fd, &read_data, sizeof(read_data)) == -1)
292  return -1;
293 
294  if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
295  ++id_idx;
296  if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
297  ++id_idx;
298 
299  perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
300  return 0;
301 }
302 
303 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
304 {
305  struct hlist_head *head;
306  struct hlist_node *pos;
307  struct perf_sample_id *sid;
308  int hash;
309 
310  if (evlist->nr_entries == 1)
311  return perf_evlist__first(evlist);
312 
313  hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
314  head = &evlist->heads[hash];
315 
316  hlist_for_each_entry(sid, pos, head, node)
317  if (sid->id == id)
318  return sid->evsel;
319 
320  if (!perf_evlist__sample_id_all(evlist))
321  return perf_evlist__first(evlist);
322 
323  return NULL;
324 }
325 
327 {
328  /* XXX Move this to perf.c, making it generally available */
329  unsigned int page_size = sysconf(_SC_PAGE_SIZE);
330  struct perf_mmap *md = &evlist->mmap[idx];
331  unsigned int head = perf_mmap__read_head(md);
332  unsigned int old = md->prev;
333  unsigned char *data = md->base + page_size;
334  union perf_event *event = NULL;
335 
336  if (evlist->overwrite) {
337  /*
338  * If we're further behind than half the buffer, there's a chance
339  * the writer will bite our tail and mess up the samples under us.
340  *
341  * If we somehow ended up ahead of the head, we got messed up.
342  *
343  * In either case, truncate and restart at head.
344  */
345  int diff = head - old;
346  if (diff > md->mask / 2 || diff < 0) {
347  fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
348 
349  /*
350  * head points to a known good entry, start there.
351  */
352  old = head;
353  }
354  }
355 
356  if (old != head) {
357  size_t size;
358 
359  event = (union perf_event *)&data[old & md->mask];
360  size = event->header.size;
361 
362  /*
363  * Event straddles the mmap boundary -- header should always
364  * be inside due to u64 alignment of output.
365  */
366  if ((old & md->mask) + size != ((old + size) & md->mask)) {
367  unsigned int offset = old;
368  unsigned int len = min(sizeof(*event), size), cpy;
369  void *dst = &evlist->event_copy;
370 
371  do {
372  cpy = min(md->mask + 1 - (offset & md->mask), len);
373  memcpy(dst, &data[offset & md->mask], cpy);
374  offset += cpy;
375  dst += cpy;
376  len -= cpy;
377  } while (len);
378 
379  event = &evlist->event_copy;
380  }
381 
382  old += size;
383  }
384 
385  md->prev = old;
386 
387  if (!evlist->overwrite)
388  perf_mmap__write_tail(md, old);
389 
390  return event;
391 }
392 
393 void perf_evlist__munmap(struct perf_evlist *evlist)
394 {
395  int i;
396 
397  for (i = 0; i < evlist->nr_mmaps; i++) {
398  if (evlist->mmap[i].base != NULL) {
399  munmap(evlist->mmap[i].base, evlist->mmap_len);
400  evlist->mmap[i].base = NULL;
401  }
402  }
403 
404  free(evlist->mmap);
405  evlist->mmap = NULL;
406 }
407 
408 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
409 {
410  evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
411  if (cpu_map__all(evlist->cpus))
412  evlist->nr_mmaps = evlist->threads->nr;
413  evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
414  return evlist->mmap != NULL ? 0 : -ENOMEM;
415 }
416 
417 static int __perf_evlist__mmap(struct perf_evlist *evlist,
418  int idx, int prot, int mask, int fd)
419 {
420  evlist->mmap[idx].prev = 0;
421  evlist->mmap[idx].mask = mask;
422  evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
423  MAP_SHARED, fd, 0);
424  if (evlist->mmap[idx].base == MAP_FAILED) {
425  evlist->mmap[idx].base = NULL;
426  return -1;
427  }
428 
429  perf_evlist__add_pollfd(evlist, fd);
430  return 0;
431 }
432 
433 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
434 {
435  struct perf_evsel *evsel;
436  int cpu, thread;
437 
438  for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
439  int output = -1;
440 
441  for (thread = 0; thread < evlist->threads->nr; thread++) {
442  list_for_each_entry(evsel, &evlist->entries, node) {
443  int fd = FD(evsel, cpu, thread);
444 
445  if (output == -1) {
446  output = fd;
447  if (__perf_evlist__mmap(evlist, cpu,
448  prot, mask, output) < 0)
449  goto out_unmap;
450  } else {
451  if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
452  goto out_unmap;
453  }
454 
455  if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
456  perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
457  goto out_unmap;
458  }
459  }
460  }
461 
462  return 0;
463 
464 out_unmap:
465  for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
466  if (evlist->mmap[cpu].base != NULL) {
467  munmap(evlist->mmap[cpu].base, evlist->mmap_len);
468  evlist->mmap[cpu].base = NULL;
469  }
470  }
471  return -1;
472 }
473 
474 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
475 {
476  struct perf_evsel *evsel;
477  int thread;
478 
479  for (thread = 0; thread < evlist->threads->nr; thread++) {
480  int output = -1;
481 
482  list_for_each_entry(evsel, &evlist->entries, node) {
483  int fd = FD(evsel, 0, thread);
484 
485  if (output == -1) {
486  output = fd;
487  if (__perf_evlist__mmap(evlist, thread,
488  prot, mask, output) < 0)
489  goto out_unmap;
490  } else {
491  if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
492  goto out_unmap;
493  }
494 
495  if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
496  perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
497  goto out_unmap;
498  }
499  }
500 
501  return 0;
502 
503 out_unmap:
504  for (thread = 0; thread < evlist->threads->nr; thread++) {
505  if (evlist->mmap[thread].base != NULL) {
506  munmap(evlist->mmap[thread].base, evlist->mmap_len);
507  evlist->mmap[thread].base = NULL;
508  }
509  }
510  return -1;
511 }
512 
528 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
529  bool overwrite)
530 {
531  unsigned int page_size = sysconf(_SC_PAGE_SIZE);
532  struct perf_evsel *evsel;
533  const struct cpu_map *cpus = evlist->cpus;
534  const struct thread_map *threads = evlist->threads;
535  int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
536 
537  /* 512 kiB: default amount of unprivileged mlocked memory */
538  if (pages == UINT_MAX)
539  pages = (512 * 1024) / page_size;
540  else if (!is_power_of_2(pages))
541  return -EINVAL;
542 
543  mask = pages * page_size - 1;
544 
545  if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
546  return -ENOMEM;
547 
548  if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
549  return -ENOMEM;
550 
551  evlist->overwrite = overwrite;
552  evlist->mmap_len = (pages + 1) * page_size;
553 
554  list_for_each_entry(evsel, &evlist->entries, node) {
555  if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
556  evsel->sample_id == NULL &&
557  perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
558  return -ENOMEM;
559  }
560 
561  if (cpu_map__all(cpus))
562  return perf_evlist__mmap_per_thread(evlist, prot, mask);
563 
564  return perf_evlist__mmap_per_cpu(evlist, prot, mask);
565 }
566 
568  struct perf_target *target)
569 {
570  evlist->threads = thread_map__new_str(target->pid, target->tid,
571  target->uid);
572 
573  if (evlist->threads == NULL)
574  return -1;
575 
576  if (perf_target__has_task(target))
577  evlist->cpus = cpu_map__dummy_new();
578  else if (!perf_target__has_cpu(target) && !target->uses_mmap)
579  evlist->cpus = cpu_map__dummy_new();
580  else
581  evlist->cpus = cpu_map__new(target->cpu_list);
582 
583  if (evlist->cpus == NULL)
584  goto out_delete_threads;
585 
586  return 0;
587 
588 out_delete_threads:
589  thread_map__delete(evlist->threads);
590  return -1;
591 }
592 
594 {
595  cpu_map__delete(evlist->cpus);
596  thread_map__delete(evlist->threads);
597  evlist->cpus = NULL;
598  evlist->threads = NULL;
599 }
600 
602 {
603  struct perf_evsel *evsel;
604  int err = 0;
605  const int ncpus = cpu_map__nr(evlist->cpus),
606  nthreads = evlist->threads->nr;
607 
608  list_for_each_entry(evsel, &evlist->entries, node) {
609  if (evsel->filter == NULL)
610  continue;
611 
612  err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
613  if (err)
614  break;
615  }
616 
617  return err;
618 }
619 
620 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
621 {
622  struct perf_evsel *evsel;
623  int err = 0;
624  const int ncpus = cpu_map__nr(evlist->cpus),
625  nthreads = evlist->threads->nr;
626 
627  list_for_each_entry(evsel, &evlist->entries, node) {
628  err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
629  if (err)
630  break;
631  }
632 
633  return err;
634 }
635 
637 {
638  struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
639 
641  if (first->attr.sample_type != pos->attr.sample_type)
642  return false;
643  }
644 
645  return true;
646 }
647 
649 {
650  struct perf_evsel *first = perf_evlist__first(evlist);
651  return first->attr.sample_type;
652 }
653 
655 {
656  struct perf_evsel *first = perf_evlist__first(evlist);
657  struct perf_sample *data;
659  u16 size = 0;
660 
661  if (!first->attr.sample_id_all)
662  goto out;
663 
664  sample_type = first->attr.sample_type;
665 
666  if (sample_type & PERF_SAMPLE_TID)
667  size += sizeof(data->tid) * 2;
668 
669  if (sample_type & PERF_SAMPLE_TIME)
670  size += sizeof(data->time);
671 
672  if (sample_type & PERF_SAMPLE_ID)
673  size += sizeof(data->id);
674 
675  if (sample_type & PERF_SAMPLE_STREAM_ID)
676  size += sizeof(data->stream_id);
677 
678  if (sample_type & PERF_SAMPLE_CPU)
679  size += sizeof(data->cpu) * 2;
680 out:
681  return size;
682 }
683 
685 {
686  struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
687 
689  if (first->attr.sample_id_all != pos->attr.sample_id_all)
690  return false;
691  }
692 
693  return true;
694 }
695 
697 {
698  struct perf_evsel *first = perf_evlist__first(evlist);
699  return first->attr.sample_id_all;
700 }
701 
703  struct perf_evsel *evsel)
704 {
705  evlist->selected = evsel;
706 }
707 
708 int perf_evlist__open(struct perf_evlist *evlist)
709 {
710  struct perf_evsel *evsel;
711  int err, ncpus, nthreads;
712 
713  list_for_each_entry(evsel, &evlist->entries, node) {
714  err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
715  if (err < 0)
716  goto out_err;
717  }
718 
719  return 0;
720 out_err:
721  ncpus = evlist->cpus ? evlist->cpus->nr : 1;
722  nthreads = evlist->threads ? evlist->threads->nr : 1;
723 
724  list_for_each_entry_reverse(evsel, &evlist->entries, node)
725  perf_evsel__close(evsel, ncpus, nthreads);
726 
727  errno = -err;
728  return err;
729 }
730 
732  struct perf_record_opts *opts,
733  const char *argv[])
734 {
735  int child_ready_pipe[2], go_pipe[2];
736  char bf;
737 
738  if (pipe(child_ready_pipe) < 0) {
739  perror("failed to create 'ready' pipe");
740  return -1;
741  }
742 
743  if (pipe(go_pipe) < 0) {
744  perror("failed to create 'go' pipe");
745  goto out_close_ready_pipe;
746  }
747 
748  evlist->workload.pid = fork();
749  if (evlist->workload.pid < 0) {
750  perror("failed to fork");
751  goto out_close_pipes;
752  }
753 
754  if (!evlist->workload.pid) {
755  if (opts->pipe_output)
756  dup2(2, 1);
757 
758  close(child_ready_pipe[0]);
759  close(go_pipe[1]);
760  fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
761 
762  /*
763  * Do a dummy execvp to get the PLT entry resolved,
764  * so we avoid the resolver overhead on the real
765  * execvp call.
766  */
767  execvp("", (char **)argv);
768 
769  /*
770  * Tell the parent we're ready to go
771  */
772  close(child_ready_pipe[1]);
773 
774  /*
775  * Wait until the parent tells us to go.
776  */
777  if (read(go_pipe[0], &bf, 1) == -1)
778  perror("unable to read pipe");
779 
780  execvp(argv[0], (char **)argv);
781 
782  perror(argv[0]);
783  kill(getppid(), SIGUSR1);
784  exit(-1);
785  }
786 
787  if (perf_target__none(&opts->target))
788  evlist->threads->map[0] = evlist->workload.pid;
789 
790  close(child_ready_pipe[1]);
791  close(go_pipe[0]);
792  /*
793  * wait for child to settle
794  */
795  if (read(child_ready_pipe[0], &bf, 1) == -1) {
796  perror("unable to read pipe");
797  goto out_close_pipes;
798  }
799 
800  evlist->workload.cork_fd = go_pipe[1];
801  close(child_ready_pipe[0]);
802  return 0;
803 
804 out_close_pipes:
805  close(go_pipe[0]);
806  close(go_pipe[1]);
807 out_close_ready_pipe:
808  close(child_ready_pipe[0]);
809  close(child_ready_pipe[1]);
810  return -1;
811 }
812 
814 {
815  if (evlist->workload.cork_fd > 0) {
816  /*
817  * Remove the cork, let it rip!
818  */
819  return close(evlist->workload.cork_fd);
820  }
821 
822  return 0;
823 }
824 
826  struct perf_sample *sample)
827 {
828  struct perf_evsel *evsel = perf_evlist__first(evlist);
829  return perf_evsel__parse_sample(evsel, event, sample);
830 }
831 
832 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
833 {
834  struct perf_evsel *evsel;
835  size_t printed = 0;
836 
837  list_for_each_entry(evsel, &evlist->entries, node) {
838  printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
839  perf_evsel__name(evsel));
840  }
841 
842  return printed + fprintf(fp, "\n");;
843 }