23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
36 INIT_LIST_HEAD(&evlist->
entries);
37 perf_evlist__set_maps(evlist, cpus, threads);
57 if (evlist->
cpus->map[0] < 0)
60 first = perf_evlist__first(evlist);
70 static void perf_evlist__purge(
struct perf_evlist *evlist)
75 list_del_init(&pos->
node);
92 perf_evlist__purge(evlist);
107 list_splice_tail(list, &evlist->
entries);
145 evsel->
name = strdup(
"cycles");
157 static int perf_evlist__add_attrs(
struct perf_evlist *evlist,
164 for (i = 0; i < nr_attrs; i++) {
167 goto out_delete_partial_list;
175 out_delete_partial_list:
186 for (i = 0; i < nr_attrs; i++)
189 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
199 (
int)evsel->
attr.config ==
id)
225 for (cpu = 0; cpu < evlist->
cpus->nr; cpu++) {
227 for (thread = 0; thread < evlist->
threads->nr; thread++)
228 ioctl(
FD(pos, cpu, thread),
239 for (cpu = 0; cpu < cpu_map__nr(evlist->
cpus); cpu++) {
241 for (thread = 0; thread < evlist->
threads->nr; thread++)
242 ioctl(
FD(pos, cpu, thread),
248 static int perf_evlist__alloc_pollfd(
struct perf_evlist *evlist)
263 static void perf_evlist__id_hash(
struct perf_evlist *evlist,
273 hlist_add_head(&sid->
node, &evlist->
heads[hash]);
279 perf_evlist__id_hash(evlist, evsel, cpu, thread,
id);
283 static int perf_evlist__id_add_fd(
struct perf_evlist *evlist,
287 u64 read_data[4] = { 0, };
291 read(fd, &read_data,
sizeof(read_data)) == -1)
311 return perf_evlist__first(evlist);
321 return perf_evlist__first(evlist);
329 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
331 unsigned int head = perf_mmap__read_head(md);
332 unsigned int old = md->
prev;
345 int diff = head - old;
346 if (diff > md->
mask / 2 || diff < 0) {
347 fprintf(stderr,
"WARNING: failed to keep up with mmap data.\n");
367 unsigned int offset = old;
372 cpy =
min(md->
mask + 1 - (offset & md->
mask), len);
388 perf_mmap__write_tail(md, old);
397 for (i = 0; i < evlist->
nr_mmaps; i++) {
408 static int perf_evlist__alloc_mmap(
struct perf_evlist *evlist)
411 if (cpu_map__all(evlist->
cpus))
417 static int __perf_evlist__mmap(
struct perf_evlist *evlist,
418 int idx,
int prot,
int mask,
int fd)
424 if (evlist->
mmap[idx].base == MAP_FAILED) {
433 static int perf_evlist__mmap_per_cpu(
struct perf_evlist *evlist,
int prot,
int mask)
438 for (cpu = 0; cpu < evlist->
cpus->nr; cpu++) {
441 for (thread = 0; thread < evlist->
threads->nr; thread++) {
443 int fd =
FD(evsel, cpu, thread);
447 if (__perf_evlist__mmap(evlist, cpu,
448 prot, mask, output) < 0)
456 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
465 for (cpu = 0; cpu < evlist->
cpus->nr; cpu++) {
466 if (evlist->
mmap[cpu].base !=
NULL) {
474 static int perf_evlist__mmap_per_thread(
struct perf_evlist *evlist,
int prot,
int mask)
479 for (thread = 0; thread < evlist->
threads->nr; thread++) {
483 int fd =
FD(evsel, 0, thread);
487 if (__perf_evlist__mmap(evlist, thread,
488 prot, mask, output) < 0)
496 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
504 for (thread = 0; thread < evlist->
threads->nr; thread++) {
505 if (evlist->
mmap[thread].base !=
NULL) {
531 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
539 pages = (512 * 1024) / page_size;
543 mask = pages * page_size - 1;
545 if (evlist->
mmap ==
NULL && perf_evlist__alloc_mmap(evlist) < 0)
548 if (evlist->
pollfd ==
NULL && perf_evlist__alloc_pollfd(evlist) < 0)
552 evlist->
mmap_len = (pages + 1) * page_size;
561 if (cpu_map__all(cpus))
562 return perf_evlist__mmap_per_thread(evlist, prot, mask);
564 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
576 if (perf_target__has_task(target))
578 else if (!perf_target__has_cpu(target) && !target->
uses_mmap)
584 goto out_delete_threads;
605 const int ncpus = cpu_map__nr(evlist->
cpus),
606 nthreads = evlist->
threads->nr;
624 const int ncpus = cpu_map__nr(evlist->
cpus),
625 nthreads = evlist->
threads->nr;
641 if (first->
attr.sample_type !=
pos->attr.sample_type)
651 return first->
attr.sample_type;
661 if (!first->
attr.sample_id_all)
664 sample_type = first->
attr.sample_type;
667 size +=
sizeof(data->
tid) * 2;
670 size +=
sizeof(data->
time);
673 size +=
sizeof(data->
id);
679 size +=
sizeof(data->
cpu) * 2;
689 if (first->
attr.sample_id_all !=
pos->attr.sample_id_all)
699 return first->
attr.sample_id_all;
721 ncpus = evlist->
cpus ? evlist->
cpus->nr : 1;
735 int child_ready_pipe[2], go_pipe[2];
738 if (
pipe(child_ready_pipe) < 0) {
739 perror(
"failed to create 'ready' pipe");
743 if (
pipe(go_pipe) < 0) {
744 perror(
"failed to create 'go' pipe");
745 goto out_close_ready_pipe;
750 perror(
"failed to fork");
751 goto out_close_pipes;
758 close(child_ready_pipe[0]);
767 execvp(
"", (
char **)argv);
772 close(child_ready_pipe[1]);
777 if (
read(go_pipe[0], &bf, 1) == -1)
778 perror(
"unable to read pipe");
780 execvp(argv[0], (
char **)argv);
787 if (perf_target__none(&opts->
target))
790 close(child_ready_pipe[1]);
795 if (
read(child_ready_pipe[0], &bf, 1) == -1) {
796 perror(
"unable to read pipe");
797 goto out_close_pipes;
800 evlist->
workload.cork_fd = go_pipe[1];
801 close(child_ready_pipe[0]);
807 out_close_ready_pipe:
808 close(child_ready_pipe[0]);
809 close(child_ready_pipe[1]);
819 return close(evlist->
workload.cork_fd);
828 struct perf_evsel *evsel = perf_evlist__first(evlist);
838 printed +=
fprintf(fp,
"%s%s", evsel->
idx ?
", " :
"",
842 return printed +
fprintf(fp,
"\n");;