1 #define _FILE_OFFSET_BITS 64
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <sys/utsname.h>
27 static bool no_buildid_cache =
false;
29 static int trace_event_count;
32 static u32 header_argc;
33 static const char **header_argv;
40 pr_warning(
"Event %s will be truncated\n", name);
42 nevents = realloc(trace_events, (trace_event_count + 1) *
sizeof(*trace_events));
57 for (i = 0 ; i < trace_event_count; i++) {
59 return trace_events[
i].
name;
73 static const char *__perf_magic1 =
"PERFFILE";
74 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
75 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
77 #define PERF_MAGIC __perf_magic2
86 set_bit(feat, header->adds_features);
96 return test_bit(feat, header->adds_features);
114 #define NAME_ALIGN 64
116 static int write_padded(
int fd,
const void *
bf,
size_t count,
117 size_t count_aligned)
123 err =
do_write(fd, zero_buf, count_aligned - count);
128 static int do_write_string(
int fd,
const char *
str)
137 ret =
do_write(fd, &len,
sizeof(len));
141 return write_padded(fd, str, olen, len);
150 sz =
read(fd, &len,
sizeof(len));
161 ret =
read(fd, buf, len);
189 header_argc = (
u32)argc;
192 header_argv = calloc(argc,
sizeof(
char *));
200 for (i = 0; i <
argc ; i++)
201 header_argv[i] = argv[i];
206 #define dsos__for_each_with_build_id(pos, head) \
207 list_for_each_entry(pos, head, node) \
208 if (!pos->has_build_id) \
212 static int write_buildid(
char *
name,
size_t name_len,
u8 *build_id,
226 b.
header.size =
sizeof(
b) + len;
232 return write_padded(fd, name, name_len + 1, len);
256 err = write_buildid(name, name_len, pos->
build_id,
265 static int machine__write_buildid_table(
struct machine *
machine,
int fd)
271 if (!machine__is_host(machine)) {
276 err = __dsos__write_buildid_table(&machine->
kernel_dsos, machine->
pid,
279 err = __dsos__write_buildid_table(&machine->
user_dsos,
280 machine->
pid, umisc, fd);
289 int err = machine__write_buildid_table(&session->
host_machine, fd);
296 err = machine__write_buildid_table(pos, fd);
304 const char *name,
bool is_kallsyms,
bool is_vdso)
308 *linkname =
zalloc(size), *targetname;
310 bool slash = is_kallsyms || is_vdso;
314 pr_debug(
"Not caching a kptr_restrict'ed /proc/kallsyms\n");
317 realname = (
char *) name;
319 realname = realpath(name,
NULL);
321 if (realname ==
NULL || filename ==
NULL || linkname ==
NULL)
324 len =
scnprintf(filename, size,
"%s%s%s",
325 debugdir, slash ?
"/" :
"",
330 snprintf(filename + len, size - len,
"/%s", sbuild_id);
332 if (
access(filename, F_OK)) {
334 if (
copyfile(
"/proc/kallsyms", filename))
336 }
else if (
link(realname, filename) &&
copyfile(name, filename))
340 len =
scnprintf(linkname, size,
"%s/.build-id/%.2s",
341 debugdir, sbuild_id);
346 snprintf(linkname + len, size - len,
"/%s", sbuild_id + 2);
347 targetname = filename +
strlen(debugdir) - 5;
348 memcpy(targetname,
"../..", 5);
350 if (symlink(targetname, linkname) == 0)
360 static int build_id_cache__add_b(
const u8 *build_id,
size_t build_id_size,
361 const char *name,
const char *debugdir,
362 bool is_kallsyms,
bool is_vdso)
369 is_kallsyms, is_vdso);
379 if (filename ==
NULL || linkname ==
NULL)
382 snprintf(linkname, size,
"%s/.build-id/%.2s/%s",
383 debugdir, sbuild_id, sbuild_id + 2);
385 if (
access(linkname, F_OK))
388 if (readlink(linkname, filename, size - 1) < 0)
391 if (unlink(linkname))
397 snprintf(linkname, size,
"%s/.build-id/%.2s/%s",
398 debugdir, sbuild_id, filename);
400 if (unlink(linkname))
410 static int dso__cache_build_id(
struct dso *
dso,
const char *debugdir)
417 is_kallsyms, is_vdso);
420 static int __dsos__cache_build_ids(
struct list_head *head,
const char *debugdir)
426 if (dso__cache_build_id(pos, debugdir))
432 static
int machine__cache_build_ids(
struct machine *machine,
const char *debugdir)
434 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
435 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
439 static int perf_session__cache_build_ids(
struct perf_session *session)
447 if (mkdir(debugdir, 0755) != 0 && errno !=
EEXIST)
450 ret = machine__cache_build_ids(&session->
host_machine, debugdir);
454 ret |= machine__cache_build_ids(pos, debugdir);
459 static bool machine__read_build_ids(
struct machine *machine,
bool with_hits)
466 static bool perf_session__read_build_ids(
struct perf_session *session,
bool with_hits)
469 bool ret = machine__read_build_ids(&session->
host_machine, with_hits);
473 ret |= machine__read_build_ids(pos, with_hits);
486 static int write_build_id(
int fd,
struct perf_header *
h,
494 if (!perf_session__read_build_ids(session,
true))
497 err = dsos__write_buildid_table(h, fd);
499 pr_debug(
"failed to write buildid table\n");
502 if (!no_buildid_cache)
503 perf_session__cache_build_ids(session);
508 static int write_hostname(
int fd,
struct perf_header *h __maybe_unused,
518 return do_write_string(fd,
uts.nodename);
521 static int write_osrelease(
int fd,
struct perf_header *h __maybe_unused,
531 return do_write_string(fd,
uts.release);
534 static int write_arch(
int fd,
struct perf_header *h __maybe_unused,
544 return do_write_string(fd,
uts.machine);
547 static int write_version(
int fd,
struct perf_header *h __maybe_unused,
553 static int write_cpudesc(
int fd,
struct perf_header *h __maybe_unused,
557 #define CPUINFO_PROC NULL
569 file = fopen(
"/proc/cpuinfo",
"r");
573 while (getline(&buf, &len, file) > 0) {
585 if (p && *(p+1) ==
' ' && *(p+2))
601 while ((*r++ = *q++));
605 ret = do_write_string(fd, s);
612 static int write_nrcpus(
int fd,
struct perf_header *h __maybe_unused,
619 nr = sysconf(_SC_NPROCESSORS_CONF);
625 nr = sysconf(_SC_NPROCESSORS_ONLN);
629 nra = (
u32)(nr & UINT_MAX);
631 ret =
do_write(fd, &nrc,
sizeof(nrc));
635 return do_write(fd, &nra,
sizeof(nra));
638 static int write_event_desc(
int fd,
struct perf_header *h __maybe_unused,
650 ret =
do_write(fd, &nre,
sizeof(nre));
658 ret =
do_write(fd, &sz,
sizeof(sz));
675 ret =
do_write(fd, &nri,
sizeof(nri));
695 static int write_cmdline(
int fd,
struct perf_header *h __maybe_unused,
706 sprintf(proc,
"/proc/%d/exe", getpid());
707 ret = readlink(proc, buf,
sizeof(buf));
721 ret = do_write_string(fd, buf);
725 for (i = 0 ; i < header_argc; i++) {
726 ret = do_write_string(fd, header_argv[i]);
733 #define CORE_SIB_FMT \
734 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
735 #define THRD_SIB_FMT \
736 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
745 static int build_cpu_topo(
struct cpu_topo *tp,
int cpu)
749 char *buf =
NULL, *
p;
755 fp = fopen(filename,
"r");
759 if (getline(&buf, &len, fp) <= 0)
768 for (i = 0; i < tp->
core_sib; i++) {
780 fp = fopen(filename,
"r");
784 if (getline(&buf, &len, fp) <= 0)
808 static void free_cpu_topo(
struct cpu_topo *tp)
824 static struct cpu_topo *build_cpu_topology(
void)
833 ncpus = sysconf(_SC_NPROCESSORS_CONF);
837 nr = (
u32)(ncpus & UINT_MAX);
839 sz = nr *
sizeof(
char *);
841 addr = calloc(1,
sizeof(*tp) + 2 * sz);
852 for (i = 0; i <
nr; i++) {
853 ret = build_cpu_topo(tp, i);
864 static int write_cpu_topology(
int fd,
struct perf_header *h __maybe_unused,
871 tp = build_cpu_topology();
879 for (i = 0; i < tp->
core_sib; i++) {
900 static int write_total_mem(
int fd,
struct perf_header *h __maybe_unused,
909 fp = fopen(
"/proc/meminfo",
"r");
913 while (getline(&buf, &len, fp) > 0) {
914 ret =
strncmp(buf,
"MemTotal:", 9);
921 ret =
do_write(fd, &mem,
sizeof(mem));
928 static int write_topo_node(
int fd,
int node)
932 char *buf =
NULL, *
p;
935 u64 mem_total, mem_free,
mem;
938 sprintf(str,
"/sys/devices/system/node/node%d/meminfo", node);
939 fp = fopen(str,
"r");
943 while (getline(&buf, &len, fp) > 0) {
947 if (
sscanf(buf,
"%*s %*d %s %"PRIu64, field, &mem) != 2)
949 if (!
strcmp(field,
"MemTotal:"))
951 if (!
strcmp(field,
"MemFree:"))
966 sprintf(str,
"/sys/devices/system/node/node%d/cpulist", node);
968 fp = fopen(str,
"r");
972 if (getline(&buf, &len, fp) <= 0)
979 ret = do_write_string(fd, buf);
986 static int write_numa_topology(
int fd,
struct perf_header *h __maybe_unused,
997 fp = fopen(
"/sys/devices/system/node/online",
"r");
1001 if (getline(&buf, &len, fp) <= 0)
1012 nr = (
u32)node_map->
nr;
1014 ret =
do_write(fd, &nr,
sizeof(nr));
1018 for (i = 0; i <
nr; i++) {
1019 j = (
u32)node_map->
map[i];
1024 ret = write_topo_node(fd, i);
1047 static int write_pmu_mappings(
int fd,
struct perf_header *h __maybe_unused,
1055 do_write(fd, &pmu_num,
sizeof(pmu_num));
1062 do_write_string(fd, pmu->
name);
1065 if (pwrite(fd, &pmu_num,
sizeof(pmu_num), offset) !=
sizeof(pmu_num)) {
1079 size_t sz __maybe_unused)
1084 static int write_cpuid(
int fd,
struct perf_header *h __maybe_unused,
1090 ret =
get_cpuid(buffer,
sizeof(buffer));
1096 return do_write_string(fd, buffer);
1099 static int write_branch_stack(
int fd __maybe_unused,
1106 static void print_hostname(
struct perf_header *ph,
int fd __maybe_unused,
1109 fprintf(fp,
"# hostname : %s\n", ph->
env.hostname);
1112 static void print_osrelease(
struct perf_header *ph,
int fd __maybe_unused,
1115 fprintf(fp,
"# os release : %s\n", ph->
env.os_release);
1118 static void print_arch(
struct perf_header *ph,
int fd __maybe_unused, FILE *fp)
1123 static void print_cpudesc(
struct perf_header *ph,
int fd __maybe_unused,
1126 fprintf(fp,
"# cpudesc : %s\n", ph->
env.cpu_desc);
1129 static void print_nrcpus(
struct perf_header *ph,
int fd __maybe_unused,
1132 fprintf(fp,
"# nrcpus online : %u\n", ph->
env.nr_cpus_online);
1133 fprintf(fp,
"# nrcpus avail : %u\n", ph->
env.nr_cpus_avail);
1136 static void print_version(
struct perf_header *ph,
int fd __maybe_unused,
1139 fprintf(fp,
"# perf version : %s\n", ph->
env.version);
1142 static void print_cmdline(
struct perf_header *ph,
int fd __maybe_unused,
1148 nr = ph->
env.nr_cmdline;
1149 str = ph->
env.cmdline;
1153 for (i = 0; i <
nr; i++) {
1160 static void print_cpu_topology(
struct perf_header *ph,
int fd __maybe_unused,
1166 nr = ph->
env.nr_sibling_cores;
1167 str = ph->
env.sibling_cores;
1169 for (i = 0; i <
nr; i++) {
1170 fprintf(fp,
"# sibling cores : %s\n", str);
1174 nr = ph->
env.nr_sibling_threads;
1175 str = ph->
env.sibling_threads;
1177 for (i = 0; i <
nr; i++) {
1178 fprintf(fp,
"# sibling threads : %s\n", str);
1190 for (evsel = events; evsel->
attr.size; evsel++) {
1211 ret =
read(fd, &nre,
sizeof(nre));
1212 if (ret != (
ssize_t)
sizeof(nre))
1216 nre = bswap_32(nre);
1218 ret =
read(fd, &sz,
sizeof(sz));
1219 if (ret != (
ssize_t)
sizeof(sz))
1231 events = calloc(nre + 1,
sizeof(*events));
1235 msz =
sizeof(evsel->
attr);
1239 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1246 ret =
read(fd, buf, sz);
1255 ret =
read(fd, &nr,
sizeof(nr));
1256 if (ret != (
ssize_t)
sizeof(nr))
1264 evsel->
name = do_read_string(fd, ph);
1269 id = calloc(nr,
sizeof(*
id));
1275 for (j = 0 ; j <
nr; j++) {
1276 ret =
read(fd,
id,
sizeof(*
id));
1277 if (ret != (
ssize_t)
sizeof(*
id))
1280 *
id = bswap_64(*
id);
1290 free_event_desc(events);
1295 static void print_event_desc(
struct perf_header *ph,
int fd, FILE *fp)
1297 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1302 fprintf(fp,
"# event desc: not available or unable to read\n");
1306 for (evsel = events; evsel->
attr.size; evsel++) {
1307 fprintf(fp,
"# event : name = %s, ", evsel->
name);
1316 fprintf(fp,
", excl_usr = %d, excl_kern = %d",
1317 evsel->
attr.exclude_user,
1318 evsel->
attr.exclude_kernel);
1320 fprintf(fp,
", excl_host = %d, excl_guest = %d",
1321 evsel->
attr.exclude_host,
1322 evsel->
attr.exclude_guest);
1324 fprintf(fp,
", precise_ip = %d", evsel->
attr.precise_ip);
1328 for (j = 0,
id = evsel->
id; j < evsel->
ids; j++,
id++) {
1339 free_event_desc(events);
1342 static void print_total_mem(
struct perf_header *ph,
int fd __maybe_unused,
1345 fprintf(fp,
"# total memory : %Lu kB\n", ph->
env.total_mem);
1348 static void print_numa_topology(
struct perf_header *ph,
int fd __maybe_unused,
1356 nr = ph->
env.nr_numa_nodes;
1357 str = ph->
env.numa_nodes;
1359 for (i = 0; i <
nr; i++) {
1361 c = strtoul(str, &tmp, 0);
1366 mem_total =
strtoull(str, &tmp, 0);
1376 " free = %"PRIu64" kB\n",
1377 c, mem_total, mem_free);
1380 fprintf(fp,
"# node%u cpu list : %s\n", c, str);
1386 fprintf(fp,
"# numa topology : not available\n");
1389 static void print_cpuid(
struct perf_header *ph,
int fd __maybe_unused, FILE *fp)
1391 fprintf(fp,
"# cpuid : %s\n", ph->
env.cpuid);
1394 static void print_branch_stack(
struct perf_header *ph __maybe_unused,
1395 int fd __maybe_unused, FILE *fp)
1397 fprintf(fp,
"# contains samples with branch stack\n");
1400 static void print_pmu_mappings(
struct perf_header *ph,
int fd __maybe_unused,
1403 const char *delimiter =
"# pmu mappings: ";
1408 pmu_num = ph->
env.nr_pmu_mappings;
1410 fprintf(fp,
"# pmu mappings: not available\n");
1414 str = ph->
env.pmu_mappings;
1417 type = strtoul(str, &tmp, 0);
1434 fprintf(fp,
"# pmu mappings: unable to read\n");
1448 machine = perf_session__findnew_machine(session, bev->
pid);
1478 if (filename[0] ==
'[')
1483 pr_debug(
"build id event received for %s: %s\n",
1492 static int perf_header__read_build_ids_abi_quirk(
struct perf_header *header,
1505 while (offset < limit) {
1508 if (
read(input, &old_bev,
sizeof(old_bev)) !=
sizeof(old_bev))
1514 len = old_bev.header.size -
sizeof(old_bev);
1515 if (
read(input, filename, len) != len)
1518 bev.
header = old_bev.header;
1530 __event_process_build_id(&bev, filename, session);
1532 offset += bev.
header.size;
1538 static int perf_header__read_build_ids(
struct perf_header *header,
1539 int input,
u64 offset,
u64 size)
1547 while (offset < limit) {
1550 if (
read(input, &bev,
sizeof(bev)) !=
sizeof(bev))
1556 len = bev.
header.size -
sizeof(bev);
1557 if (
read(input, filename, len) != len)
1572 if (
memcmp(filename,
"nel.kallsyms]", 13) == 0) {
1575 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1578 __event_process_build_id(&bev, filename, session);
1580 offset += bev.
header.size;
1597 void *
data __maybe_unused)
1599 if (perf_header__read_build_ids(ph, fd, section->
offset, section->
size))
1600 pr_debug(
"Failed to read buildids, continuing...\n");
1606 void *
data __maybe_unused)
1608 ph->
env.hostname = do_read_string(fd, ph);
1614 void *
data __maybe_unused)
1616 ph->
env.os_release = do_read_string(fd, ph);
1617 return ph->
env.os_release ? 0 : -
ENOMEM;
1622 void *
data __maybe_unused)
1624 ph->
env.version = do_read_string(fd, ph);
1630 void *
data __maybe_unused)
1632 ph->
env.arch = do_read_string(fd, ph);
1638 void *
data __maybe_unused)
1643 ret =
read(fd, &nr,
sizeof(nr));
1644 if (ret !=
sizeof(nr))
1650 ph->
env.nr_cpus_online =
nr;
1652 ret =
read(fd, &nr,
sizeof(nr));
1653 if (ret !=
sizeof(nr))
1659 ph->
env.nr_cpus_avail =
nr;
1665 void *
data __maybe_unused)
1667 ph->
env.cpu_desc = do_read_string(fd, ph);
1673 void *
data __maybe_unused)
1675 ph->
env.cpuid = do_read_string(fd, ph);
1681 void *
data __maybe_unused)
1686 ret =
read(fd, &mem,
sizeof(mem));
1687 if (ret !=
sizeof(mem))
1691 mem = bswap_64(mem);
1703 if (evsel->
idx == idx)
1711 perf_evlist__set_event_name(
struct perf_evlist *evlist,
1719 evsel = perf_evlist__find_by_index(evlist, event->
idx);
1732 void *
data __maybe_unused)
1735 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1741 for (evsel = events; evsel->
attr.size; evsel++)
1742 perf_evlist__set_event_name(session->
evlist, evsel);
1744 free_event_desc(events);
1751 void *
data __maybe_unused)
1758 ret =
read(fd, &nr,
sizeof(nr));
1759 if (ret !=
sizeof(nr))
1765 ph->
env.nr_cmdline =
nr;
1768 for (i = 0; i <
nr; i++) {
1769 str = do_read_string(fd, ph);
1785 static int process_cpu_topology(
struct perf_file_section *section __maybe_unused,
1787 void *
data __maybe_unused)
1794 ret =
read(fd, &nr,
sizeof(nr));
1795 if (ret !=
sizeof(nr))
1801 ph->
env.nr_sibling_cores =
nr;
1804 for (i = 0; i <
nr; i++) {
1805 str = do_read_string(fd, ph);
1815 ret =
read(fd, &nr,
sizeof(nr));
1816 if (ret !=
sizeof(nr))
1822 ph->
env.nr_sibling_threads =
nr;
1824 for (i = 0; i <
nr; i++) {
1825 str = do_read_string(fd, ph);
1841 static int process_numa_topology(
struct perf_file_section *section __maybe_unused,
1843 void *
data __maybe_unused)
1852 ret =
read(fd, &nr,
sizeof(nr));
1853 if (ret !=
sizeof(nr))
1859 ph->
env.nr_numa_nodes =
nr;
1862 for (i = 0; i <
nr; i++) {
1864 ret =
read(fd, &node,
sizeof(node));
1865 if (ret !=
sizeof(node))
1868 ret =
read(fd, &mem_total,
sizeof(
u64));
1869 if (ret !=
sizeof(
u64))
1872 ret =
read(fd, &mem_free,
sizeof(
u64));
1873 if (ret !=
sizeof(
u64))
1877 node = bswap_32(node);
1878 mem_total = bswap_64(mem_total);
1879 mem_free = bswap_64(mem_free);
1883 node, mem_total, mem_free);
1885 str = do_read_string(fd, ph);
1901 static int process_pmu_mappings(
struct perf_file_section *section __maybe_unused,
1903 void *
data __maybe_unused)
1911 ret =
read(fd, &pmu_num,
sizeof(pmu_num));
1912 if (ret !=
sizeof(pmu_num))
1916 pmu_num = bswap_32(pmu_num);
1919 pr_debug(
"pmu mappings not available\n");
1923 ph->
env.nr_pmu_mappings = pmu_num;
1927 if (
read(fd, &type,
sizeof(type)) !=
sizeof(type))
1930 type = bswap_32(type);
1932 name = do_read_string(fd, ph);
1960 #define FEAT_OPA(n, func) \
1961 [n] = { .name = #n, .write = write_##func, .print = print_##func }
1962 #define FEAT_OPP(n, func) \
1963 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1964 .process = process_##func }
1965 #define FEAT_OPF(n, func) \
1966 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1967 .process = process_##func, .full_only = true }
1970 #define print_tracing_data NULL
1971 #define print_build_id NULL
2005 "%d, continuing...\n", section->
offset, feat);
2012 if (!feat_ops[feat].print)
2015 if (!feat_ops[feat].full_only || hd->
full)
2018 fprintf(hd->
fp,
"# %s info available, use -I to display\n",
2019 feat_ops[feat].
name);
2028 int fd = session->
fd;
2033 perf_file_section__fprintf_info);
2037 static int do_write_feat(
int fd,
struct perf_header *h,
int type,
2045 if (!feat_ops[type].
write)
2048 (*p)->offset = lseek(fd, 0,
SEEK_CUR);
2050 err = feat_ops[
type].
write(fd, h, evlist);
2052 pr_debug(
"failed to write feature %d\n", type);
2059 (*p)->size = lseek(fd, 0,
SEEK_CUR) - (*p)->offset;
2065 static int perf_header__adds_write(
struct perf_header *header,
2079 feat_sec = p = calloc(
sizeof(*feat_sec), nr_sections);
2080 if (feat_sec ==
NULL)
2083 sec_size =
sizeof(*feat_sec) * nr_sections;
2086 lseek(fd, sec_start + sec_size,
SEEK_SET);
2089 if (do_write_feat(fd, header, feat, &p, evlist))
2098 err =
do_write(fd, feat_sec, sec_size);
2100 pr_debug(
"failed to write feature section\n");
2112 .size =
sizeof(f_header),
2115 err =
do_write(fd, &f_header,
sizeof(f_header));
2117 pr_debug(
"failed to write perf pipe header\n");
2126 int fd,
bool at_exit)
2134 lseek(fd,
sizeof(f_header),
SEEK_SET);
2136 if (session->
evlist != evlist)
2137 pair = perf_evlist__first(session->
evlist);
2144 pr_debug(
"failed to write perf header\n");
2147 if (session->
evlist != evlist) {
2152 pair = perf_evsel__next(pair);
2163 .size = evsel->
ids *
sizeof(
u64),
2166 err =
do_write(fd, &f_attr,
sizeof(f_attr));
2168 pr_debug(
"failed to write perf header attribute\n");
2178 pr_debug(
"failed to write perf header events\n");
2186 err = perf_header__adds_write(header, evlist, fd);
2193 .size =
sizeof(f_header),
2194 .attr_size =
sizeof(f_attr),
2209 memcpy(&f_header.adds_features, &header->adds_features,
sizeof(header->adds_features));
2212 err =
do_write(fd, &f_header,
sizeof(f_header));
2214 pr_debug(
"failed to write perf header\n");
2223 static int perf_header__getbuffer64(
struct perf_header *header,
2224 int fd,
void *buf,
size_t size)
2226 if (
readn(fd, buf, size) <= 0)
2239 int feat,
int fd,
void *data))
2251 feat_sec = sec = calloc(
sizeof(*feat_sec), nr_sections);
2255 sec_size =
sizeof(*feat_sec) * nr_sections;
2259 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2264 err = process(sec++, header, feat, fd, data);
2274 static const int attr_file_abi_sizes[] = {
2293 for (i = 0 ; attr_file_abi_sizes[
i]; i++) {
2294 ref_size = attr_file_abi_sizes[
i]
2296 if (hdr_sz != ref_size) {
2297 attr_size = bswap_64(hdr_sz);
2298 if (attr_size != ref_size)
2303 pr_debug(
"ABI%d perf.data file detected, need_swap=%d\n",
2312 #define PERF_PIPE_HDR_VER0 16
2314 static const size_t attr_pipe_abi_sizes[] = {
2331 for (i = 0 ; attr_pipe_abi_sizes[
i]; i++) {
2332 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2333 attr_size = bswap_64(hdr_sz);
2334 if (attr_size != hdr_sz)
2339 pr_debug(
"Pipe ABI%d perf.data file detected\n", i);
2351 ret =
memcmp(&magic, __perf_magic1,
sizeof(magic));
2353 pr_debug(
"legacy perf.data format\n");
2355 return try_all_pipe_abis(hdr_sz, ph);
2357 return try_all_file_abis(hdr_sz, ph);
2366 if (magic == __perf_magic2)
2370 if (magic != __perf_magic2_sw)
2385 ret =
readn(fd, header,
sizeof(*header));
2389 if (check_magic_endian(header->
magic,
2391 pr_debug(
"magic/endian check failed\n");
2400 if (header->
size !=
sizeof(*header)) {
2441 memcpy(&ph->adds_features, &header->adds_features,
2442 sizeof(ph->adds_features));
2453 int feat,
int fd,
void *data)
2457 "%d, continuing...\n", section->
offset, feat);
2462 pr_debug(
"unknown feature %d, continuing...\n", feat);
2466 if (!feat_ops[feat].process)
2469 return feat_ops[
feat].
process(section, ph, fd, data);
2478 ret =
readn(fd, header,
sizeof(*header));
2482 if (check_magic_endian(header->
magic, header->
size,
true, ph) < 0) {
2488 header->
size = bswap_64(header->
size);
2490 if (repipe &&
do_write(STDOUT_FILENO, header,
sizeof(*header)) < 0)
2496 static int perf_header__read_pipe(
struct perf_session *session,
int fd)
2501 if (perf_file_header__read_pipe(&f_header, header, fd,
2503 pr_debug(
"incompatible file format\n");
2512 static int read_attr(
int fd,
struct perf_header *ph,
2517 size_t our_sz =
sizeof(f_attr->
attr);
2520 memset(f_attr, 0,
sizeof(*f_attr));
2525 pr_debug(
"cannot read %d bytes of header attr\n",
2539 }
else if (sz > our_sz) {
2540 pr_debug(
"file uses a more recent and unsupported ABI"
2541 " (%zu bytes extra)\n", sz - our_sz);
2550 ret =
readn(fd, ptr, left);
2553 ret =
readn(fd, &f_attr->
ids,
sizeof(f_attr->
ids));
2555 return ret <= 0 ? -1 : 0;
2558 static int perf_evsel__prepare_tracepoint_event(
struct perf_evsel *evsel,
2574 evsel->
name = strdup(bf);
2583 static int perf_evlist__prepare_tracepoint_events(
struct perf_evlist *evlist,
2584 struct pevent *pevent)
2590 perf_evsel__prepare_tracepoint_event(pos, pevent))
2603 int nr_attrs, nr_ids,
i,
j;
2610 return perf_header__read_pipe(session, fd);
2618 for (i = 0; i < nr_attrs; i++) {
2622 if (read_attr(fd, header, &f_attr) < 0)
2632 goto out_delete_evlist;
2641 nr_ids = f_attr.
ids.size /
sizeof(
u64);
2648 goto out_delete_evlist;
2652 for (j = 0; j < nr_ids; j++) {
2653 if (perf_header__getbuffer64(header, fd, &f_id,
sizeof(f_id)))
2667 if (trace_events ==
NULL)
2669 if (perf_header__getbuffer64(header, fd, trace_events,
2676 perf_file_section__process);
2680 if (perf_evlist__prepare_tracepoint_events(session->
evlist,
2682 goto out_delete_evlist;
2706 size += ids *
sizeof(
u64);
2713 ev->attr.attr = *
attr;
2714 memcpy(ev->attr.id,
id, ids *
sizeof(
u64));
2717 ev->attr.header.size = (
u16)size;
2719 if (ev->attr.header.size == size)
2720 err = process(tool, ev,
NULL,
NULL);
2738 evsel->
id, process);
2740 pr_debug(
"failed to create perf header attribute\n");
2755 if (evlist ==
NULL) {
2767 ids =
event->header.size;
2768 ids -= (
void *)&event->
attr.id - (
void *)
event;
2769 n_ids = ids /
sizeof(
u64);
2778 for (i = 0; i < n_ids; i++) {
2788 struct machine *machine)
2794 memset(&ev, 0,
sizeof(ev));
2806 err = process(tool, &ev,
NULL, machine);
2813 struct machine *machine)
2818 for (i = 0; i < trace_event_count; i++) {
2819 type = &trace_events[
i];
2822 type->
name, process,
2825 pr_debug(
"failed to create perf header event type\n");
2850 int err __maybe_unused = 0;
2867 memset(&ev, 0,
sizeof(ev));
2886 return aligned_size;
2904 if (
read(session->
fd, buf, padding) < 0)
2905 die(
"reading input file");
2907 int retw =
write(STDOUT_FILENO, buf, padding);
2908 if (retw <= 0 || retw != padding)
2909 die(
"repiping tracing data padding");
2912 if (size_read + padding != size)
2913 die(
"tracing data size mismatch");
2915 perf_evlist__prepare_tracepoint_events(session->
evlist,
2922 struct dso *pos,
u16 misc,
2924 struct machine *machine)
2933 memset(&ev, 0,
sizeof(ev));
2944 err = process(tool, &ev,
NULL, machine);
2953 __event_process_build_id(&event->
build_id,
2961 no_buildid_cache =
true;