7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
46 rdmsrl(event->hw.event_base, count);
69 (!uncore_box_is_fake(box) && reg1->
alloc))
84 if (!uncore_box_is_fake(box))
89 return &constraint_empty;
105 if (uncore_box_is_fake(box) || !reg1->
alloc)
120 int box_ctl = uncore_pci_box_ctl(box);
123 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
125 pci_write_config_dword(pdev, box_ctl, config);
132 int box_ctl = uncore_pci_box_ctl(box);
135 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
137 pci_write_config_dword(pdev, box_ctl, config);
154 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
163 pci_read_config_dword(pdev, hwc->event_base, (
u32 *)&count);
164 pci_read_config_dword(pdev, hwc->event_base + 4, (
u32 *)&count + 1);
181 msr = uncore_msr_box_ctl(box);
194 msr = uncore_msr_box_ctl(box);
218 wrmsrl(hwc->config_base, hwc->config);
223 unsigned msr = uncore_msr_box_ctl(box);
234 if (box->
pmu->type == &snbep_uncore_cbox) {
237 reg1->
config =
event->attr.config1 &
240 if (box->
pmu->type == &snbep_uncore_pcu) {
252 static struct attribute *snbep_uncore_formats_attr[] = {
253 &format_attr_event.attr,
254 &format_attr_umask.attr,
255 &format_attr_edge.attr,
256 &format_attr_inv.attr,
257 &format_attr_thresh8.attr,
261 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
262 &format_attr_event.attr,
263 &format_attr_umask.attr,
264 &format_attr_edge.attr,
265 &format_attr_inv.attr,
266 &format_attr_thresh5.attr,
270 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
271 &format_attr_event.attr,
272 &format_attr_umask.attr,
273 &format_attr_edge.attr,
274 &format_attr_tid_en.attr,
275 &format_attr_inv.attr,
276 &format_attr_thresh8.attr,
277 &format_attr_filter_tid.attr,
278 &format_attr_filter_nid.attr,
279 &format_attr_filter_state.attr,
280 &format_attr_filter_opc.attr,
284 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
285 &format_attr_event.attr,
286 &format_attr_occ_sel.attr,
287 &format_attr_edge.attr,
288 &format_attr_inv.attr,
289 &format_attr_thresh5.attr,
290 &format_attr_occ_invert.attr,
291 &format_attr_occ_edge.attr,
292 &format_attr_filter_band0.attr,
293 &format_attr_filter_band1.attr,
294 &format_attr_filter_band2.attr,
295 &format_attr_filter_band3.attr,
299 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
300 &format_attr_event_ext.attr,
301 &format_attr_umask.attr,
302 &format_attr_edge.attr,
303 &format_attr_inv.attr,
304 &format_attr_thresh8.attr,
325 .attrs = snbep_uncore_formats_attr,
330 .attrs = snbep_uncore_ubox_formats_attr,
335 .attrs = snbep_uncore_cbox_formats_attr,
340 .attrs = snbep_uncore_pcu_formats_attr,
345 .attrs = snbep_uncore_qpi_formats_attr,
349 .init_box = snbep_uncore_msr_init_box,
350 .disable_box = snbep_uncore_msr_disable_box,
351 .enable_box = snbep_uncore_msr_enable_box,
352 .disable_event = snbep_uncore_msr_disable_event,
353 .enable_event = snbep_uncore_msr_enable_event,
354 .read_counter = uncore_msr_read_counter,
355 .get_constraint = uncore_get_constraint,
356 .put_constraint = uncore_put_constraint,
357 .hw_config = snbep_uncore_hw_config,
361 .init_box = snbep_uncore_pci_init_box,
362 .disable_box = snbep_uncore_pci_disable_box,
363 .enable_box = snbep_uncore_pci_enable_box,
364 .disable_event = snbep_uncore_pci_disable_event,
365 .enable_event = snbep_uncore_pci_enable_event,
366 .read_counter = snbep_uncore_pci_read_counter,
439 .fixed_ctr_bits = 48,
445 .ops = &snbep_uncore_msr_ops,
446 .format_group = &snbep_uncore_ubox_format_group,
459 .num_shared_regs = 1,
460 .constraints = snbep_uncore_cbox_constraints,
461 .ops = &snbep_uncore_msr_ops,
462 .format_group = &snbep_uncore_cbox_format_group,
474 .num_shared_regs = 1,
475 .ops = &snbep_uncore_msr_ops,
476 .format_group = &snbep_uncore_pcu_format_group,
486 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
487 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
488 .event_ctl = SNBEP_PCI_PMON_CTL0, \
489 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
490 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
491 .ops = &snbep_uncore_pci_ops, \
492 .format_group = &snbep_uncore_format_group
507 .fixed_ctr_bits = 48,
510 .event_descs = snbep_uncore_imc_events,
523 .ops = &snbep_uncore_pci_ops,
524 .event_descs = snbep_uncore_qpi_events,
525 .format_group = &snbep_uncore_qpi_format_group,
534 .constraints = snbep_uncore_r2pcie_constraints,
543 .constraints = snbep_uncore_r3qpi_constraints,
551 &snbep_uncore_r2pcie,
559 .driver_data = (
unsigned long)&snbep_uncore_ha,
563 .driver_data = (
unsigned long)&snbep_uncore_imc,
567 .driver_data = (
unsigned long)&snbep_uncore_imc,
571 .driver_data = (
unsigned long)&snbep_uncore_imc,
575 .driver_data = (
unsigned long)&snbep_uncore_imc,
579 .driver_data = (
unsigned long)&snbep_uncore_qpi,
583 .driver_data = (
unsigned long)&snbep_uncore_qpi,
587 .driver_data = (
unsigned long)&snbep_uncore_r2pcie,
591 .driver_data = (
unsigned long)&snbep_uncore_r3qpi,
595 .driver_data = (
unsigned long)&snbep_uncore_r3qpi,
600 static struct pci_driver snbep_uncore_pci_driver = {
601 .name =
"snbep_uncore",
602 .id_table = snbep_uncore_pci_ids,
608 static int snbep_pci2phy_map_init(
void)
622 bus = ubox_dev->
bus->number;
624 err = pci_read_config_dword(ubox_dev, 0x40, &config);
629 err = pci_read_config_dword(ubox_dev, 0x54, &config);
636 for (i = 0; i < 8; i++) {
637 if (nodeid == ((config >> (3 * i)) & 0x7)) {
638 pcibus_to_physid[
bus] =
i;
647 return err ? pcibios_err_to_errno(err) : 0;
664 wrmsrl(event->hw.config_base, 0);
669 if (box->
pmu->pmu_idx == 0) {
680 static struct attribute *snb_uncore_formats_attr[] = {
681 &format_attr_event.attr,
682 &format_attr_umask.attr,
683 &format_attr_edge.attr,
684 &format_attr_inv.attr,
685 &format_attr_cmask5.attr,
691 .attrs = snb_uncore_formats_attr,
695 .init_box = snb_uncore_msr_init_box,
696 .disable_event = snb_uncore_msr_disable_event,
697 .enable_event = snb_uncore_msr_enable_event,
698 .read_counter = uncore_msr_read_counter,
712 .fixed_ctr_bits = 48,
720 .constraints = snb_uncore_cbox_constraints,
721 .ops = &snb_uncore_msr_ops,
722 .format_group = &snb_uncore_format_group,
723 .event_descs = snb_uncore_events,
753 static struct attribute *nhm_uncore_formats_attr[] = {
754 &format_attr_event.attr,
755 &format_attr_umask.attr,
756 &format_attr_edge.attr,
757 &format_attr_inv.attr,
758 &format_attr_cmask8.attr,
764 .attrs = nhm_uncore_formats_attr,
781 .disable_box = nhm_uncore_msr_disable_box,
782 .enable_box = nhm_uncore_msr_enable_box,
783 .disable_event = snb_uncore_msr_disable_event,
784 .enable_event = nhm_uncore_msr_enable_event,
785 .read_counter = uncore_msr_read_counter,
793 .fixed_ctr_bits = 48,
799 .event_descs = nhm_uncore_events,
800 .ops = &nhm_uncore_msr_ops,
801 .format_group = &nhm_uncore_format_group,
811 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
812 ((1ULL << (n)) - 1)))
826 unsigned msr = uncore_msr_box_ctl(box);
831 config &= ~((1ULL << uncore_num_counters(box)) - 1);
833 if (uncore_msr_fixed_ctl(box))
841 unsigned msr = uncore_msr_box_ctl(box);
846 config |= (1ULL << uncore_num_counters(box)) - 1;
848 if (uncore_msr_fixed_ctl(box))
856 wrmsrl(event->hw.config_base, 0);
871 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
872 .init_box = nhmex_uncore_msr_init_box, \
873 .disable_box = nhmex_uncore_msr_disable_box, \
874 .enable_box = nhmex_uncore_msr_enable_box, \
875 .disable_event = nhmex_uncore_msr_disable_event, \
876 .read_counter = uncore_msr_read_counter
880 .enable_event = nhmex_uncore_msr_enable_event,
883 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
884 &format_attr_event.attr,
885 &format_attr_edge.attr,
891 .attrs = nhmex_uncore_ubox_formats_attr,
903 .ops = &nhmex_uncore_ops,
904 .format_group = &nhmex_uncore_ubox_format_group
907 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
908 &format_attr_event.attr,
909 &format_attr_umask.attr,
910 &format_attr_edge.attr,
911 &format_attr_inv.attr,
912 &format_attr_thresh8.attr,
918 .attrs = nhmex_uncore_cbox_formats_attr,
922 static unsigned nhmex_cbox_msr_offsets[] = {
923 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
935 .msr_offsets = nhmex_cbox_msr_offsets,
937 .ops = &nhmex_uncore_ops,
938 .format_group = &nhmex_uncore_cbox_format_group
958 .event_descs = nhmex_uncore_wbox_events,
959 .ops = &nhmex_uncore_ops,
960 .format_group = &nhmex_uncore_cbox_format_group
976 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
977 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
980 if (box->
pmu->pmu_idx == 0)
985 reg1->
config =
event->attr.config1;
986 reg2->
config =
event->attr.config2;
1016 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1017 &format_attr_event5.attr,
1018 &format_attr_counter.attr,
1019 &format_attr_match.attr,
1020 &format_attr_mask.attr,
1026 .attrs = nhmex_uncore_bbox_formats_attr,
1031 .enable_event = nhmex_bbox_msr_enable_event,
1032 .hw_config = nhmex_bbox_hw_config,
1033 .get_constraint = uncore_get_constraint,
1034 .put_constraint = uncore_put_constraint,
1041 .perf_ctr_bits = 48,
1048 .num_shared_regs = 1,
1049 .constraints = nhmex_uncore_bbox_constraints,
1050 .ops = &nhmex_uncore_bbox_ops,
1051 .format_group = &nhmex_uncore_bbox_format_group
1065 if (box->
pmu->pmu_idx == 0)
1070 reg1->
config =
event->attr.config1;
1071 reg2->
config =
event->attr.config2;
1082 wrmsrl(reg1->
reg, 0);
1090 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1091 &format_attr_event.attr,
1092 &format_attr_umask.attr,
1093 &format_attr_edge.attr,
1094 &format_attr_inv.attr,
1095 &format_attr_thresh8.attr,
1096 &format_attr_match.attr,
1097 &format_attr_mask.attr,
1103 .attrs = nhmex_uncore_sbox_formats_attr,
1108 .enable_event = nhmex_sbox_msr_enable_event,
1109 .hw_config = nhmex_sbox_hw_config,
1110 .get_constraint = uncore_get_constraint,
1111 .put_constraint = uncore_put_constraint,
1118 .perf_ctr_bits = 48,
1125 .num_shared_regs = 1,
1126 .ops = &nhmex_uncore_sbox_ops,
1127 .format_group = &nhmex_uncore_sbox_format_group
1141 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1169 unsigned long flags;
1255 if (new_idx > orig_idx) {
1256 idx = new_idx - orig_idx;
1259 idx = orig_idx - new_idx;
1271 if (new_idx > orig_idx)
1276 reg1->
idx = ~0xff | new_idx;
1286 int i, idx[2],
alloc = 0;
1292 for (i = 0; i < 2; i++) {
1293 if (!uncore_box_is_fake(box) && (reg1->
alloc & (0x1 << i)))
1299 if (!nhmex_mbox_get_shared_reg(box, idx[i],
1302 alloc |= (0x1 <<
i);
1307 (uncore_box_is_fake(box) || !reg2->
alloc) &&
1308 !nhmex_mbox_get_shared_reg(box, reg2->
idx, reg2->
config))
1317 if (!uncore_box_is_fake(box)) {
1326 if (idx[0] != 0xff && !(alloc & 0x1) &&
1336 idx[0] = (idx[0] + 1) % 4;
1345 nhmex_mbox_put_shared_reg(box, idx[0]);
1347 nhmex_mbox_put_shared_reg(box, idx[1]);
1348 return &constraint_empty;
1356 if (uncore_box_is_fake(box))
1359 if (reg1->
alloc & 0x1)
1361 if (reg1->
alloc & 0x2)
1366 nhmex_mbox_put_shared_reg(box, reg2->
idx);
1371 static int nhmex_mbox_extra_reg_idx(
struct extra_reg *er)
1391 for (er = nhmex_uncore_mbox_extra_regs; er->
msr; er++) {
1407 reg1->
idx &= ~(0xff << (reg_idx * 8));
1408 reg1->
reg &= ~(0xffff << (reg_idx * 16));
1409 reg1->
idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
1410 reg1->
reg |= msr << (reg_idx * 16);
1411 reg1->
config =
event->attr.config1;
1421 reg2->
config =
event->attr.config2;
1424 if (box->
pmu->pmu_idx == 0)
1435 unsigned long flags;
1458 nhmex_mbox_shared_reg_config(box, idx));
1462 nhmex_mbox_shared_reg_config(box, idx));
1465 wrmsrl(reg2->
reg, 0);
1466 if (reg2->
config != ~0ULL) {
1467 wrmsrl(reg2->
reg + 1,
1495 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1496 &format_attr_count_mode.attr,
1497 &format_attr_storage_mode.attr,
1498 &format_attr_wrap_mode.attr,
1499 &format_attr_flag_mode.attr,
1500 &format_attr_inc_sel.attr,
1501 &format_attr_set_flag_sel.attr,
1502 &format_attr_filter_cfg_en.attr,
1503 &format_attr_filter_match.attr,
1504 &format_attr_filter_mask.attr,
1505 &format_attr_dsp.attr,
1506 &format_attr_thr.attr,
1507 &format_attr_fvc.attr,
1508 &format_attr_pgt.attr,
1509 &format_attr_map.attr,
1510 &format_attr_iss.attr,
1511 &format_attr_pld.attr,
1517 .attrs = nhmex_uncore_mbox_formats_attr,
1534 .enable_event = nhmex_mbox_msr_enable_event,
1535 .hw_config = nhmex_mbox_hw_config,
1536 .get_constraint = nhmex_mbox_get_constraint,
1537 .put_constraint = nhmex_mbox_put_constraint,
1544 .perf_ctr_bits = 48,
1551 .num_shared_regs = 8,
1552 .event_descs = nhmex_uncore_mbox_events,
1553 .ops = &nhmex_uncore_mbox_ops,
1554 .format_group = &nhmex_uncore_mbox_format_group,
1563 if (reg1->
idx % 2) {
1572 switch (reg1->
idx % 6) {
1597 unsigned long flags;
1602 if (!uncore_box_is_fake(box) && reg1->
alloc)
1605 idx = reg1->
idx % 6;
1612 er_idx += (reg1->
idx / 6) * 5;
1622 }
else if (idx == 2 || idx == 3) {
1627 u64 mask = 0xff << ((idx - 2) * 8);
1637 (er->
config == (hwc->config >> 32) &&
1641 er->
config = (hwc->config >> 32);
1660 if (idx != reg1->
idx % 6) {
1668 if (!uncore_box_is_fake(box)) {
1669 if (idx != reg1->
idx % 6)
1675 return &constraint_empty;
1684 if (uncore_box_is_fake(box) || !reg1->
alloc)
1687 idx = reg1->
idx % 6;
1691 er_idx += (reg1->
idx / 6) * 5;
1694 if (idx == 2 || idx == 3)
1715 reg1->
config =
event->attr.config1;
1720 hwc->config |=
event->attr.config & (~0ULL << 32);
1721 reg2->
config =
event->attr.config2;
1730 unsigned long flags;
1750 port = idx / 6 + box->
pmu->pmu_idx * 4;
1762 nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
1788 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1789 &format_attr_event5.attr,
1790 &format_attr_xbr_mm_cfg.attr,
1791 &format_attr_xbr_match.attr,
1792 &format_attr_xbr_mask.attr,
1793 &format_attr_qlx_cfg.attr,
1794 &format_attr_iperf_cfg.attr,
1800 .attrs = nhmex_uncore_rbox_formats_attr,
1815 .enable_event = nhmex_rbox_msr_enable_event,
1816 .hw_config = nhmex_rbox_hw_config,
1817 .get_constraint = nhmex_rbox_get_constraint,
1818 .put_constraint = nhmex_rbox_put_constraint,
1825 .perf_ctr_bits = 48,
1832 .num_shared_regs = 20,
1833 .event_descs = nhmex_uncore_rbox_events,
1834 .ops = &nhmex_uncore_rbox_ops,
1835 .format_group = &nhmex_uncore_rbox_format_group
1855 hwc->last_tag = ++box->
tags[
idx];
1858 hwc->event_base = uncore_fixed_ctr(box);
1859 hwc->config_base = uncore_fixed_ctl(box);
1863 hwc->config_base = uncore_event_ctl(box, hwc->idx);
1864 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
1873 shift = 64 - uncore_fixed_ctr_bits(box);
1875 shift = 64 - uncore_perf_ctr_bits(box);
1880 new_count = uncore_read_counter(box, event);
1881 if (
local64_xchg(&event->hw.prev_count, new_count) != prev_count)
1884 delta = (new_count << shift) - (prev_count << shift);
1898 unsigned long flags;
1911 uncore_perf_event_update(box, box->
events[bit]);
1922 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
1934 box->
hrtimer.function = uncore_pmu_hrtimer;
1951 uncore_pmu_init_hrtimer(box);
2001 max_count = box->
pmu->type->num_counters;
2002 if (box->
pmu->type->fixed_ctl)
2033 if (type->
ops->get_constraint) {
2034 c = type->
ops->get_constraint(box, event);
2039 if (event->hw.config == ~0ULL)
2040 return &constraint_fixed;
2044 if ((event->hw.config & c->
cmask) == c->
code)
2054 if (box->
pmu->type->ops->put_constraint)
2055 box->
pmu->type->ops->put_constraint(box, event);
2058 static int uncore_assign_events(
struct intel_uncore_box *box,
int assign[],
int n)
2062 int i, wmin, wmax,
ret = 0;
2068 c = uncore_get_event_constraint(box, box->
event_list[i]);
2075 for (i = 0; i <
n; i++) {
2093 assign[
i] = hwc->idx;
2099 if (!assign || ret) {
2100 for (i = 0; i <
n; i++)
2101 uncore_put_event_constraint(box, box->
event_list[i]);
2103 return ret ? -
EINVAL : 0;
2106 static void uncore_pmu_event_start(
struct perf_event *event,
int flags)
2109 int idx =
event->hw.idx;
2117 event->hw.state = 0;
2122 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2123 uncore_enable_event(box, event);
2126 uncore_enable_box(box);
2127 uncore_pmu_start_hrtimer(box);
2131 static void uncore_pmu_event_stop(
struct perf_event *event,
int flags)
2137 uncore_disable_event(box, event);
2144 uncore_disable_box(box);
2145 uncore_pmu_cancel_hrtimer(box);
2154 uncore_perf_event_update(box, event);
2159 static int uncore_pmu_event_add(
struct perf_event *event,
int flags)
2169 ret = n = uncore_collect_events(box, event,
false);
2177 ret = uncore_assign_events(box, assign, n);
2182 for (i = 0; i < box->
n_events; i++) {
2186 if (hwc->idx == assign[i] &&
2187 hwc->last_tag == box->
tags[assign[i]])
2196 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2200 for (i = 0; i <
n; i++) {
2204 if (hwc->idx != assign[i] ||
2205 hwc->last_tag != box->
tags[assign[i]])
2206 uncore_assign_hw_event(box, event, assign[i]);
2207 else if (i < box->n_events)
2213 uncore_pmu_event_start(event, 0);
2220 static void uncore_pmu_event_del(
struct perf_event *event,
int flags)
2225 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2227 for (i = 0; i < box->
n_events; i++) {
2229 uncore_put_event_constraint(box, event);
2240 event->hw.last_tag = ~0ULL;
2243 static void uncore_pmu_event_read(
struct perf_event *event)
2246 uncore_perf_event_update(box, event);
2256 struct perf_event *leader =
event->group_leader;
2271 n = uncore_collect_events(fake_box, leader,
true);
2276 n = uncore_collect_events(fake_box, event,
false);
2282 ret = uncore_assign_events(fake_box,
NULL, n);
2295 if (event->
attr.type != event->pmu->type)
2298 pmu = uncore_event_to_pmu(event);
2307 if (event->
attr.exclude_user || event->
attr.exclude_kernel ||
2308 event->
attr.exclude_hv || event->
attr.exclude_idle)
2312 if (hwc->sample_period)
2321 box = uncore_pmu_to_box(pmu, event->cpu);
2322 if (!box || box->
cpu < 0)
2324 event->cpu = box->
cpu;
2327 event->hw.last_tag = ~0ULL;
2333 if (!pmu->
type->fixed_ctl)
2341 hwc->config = ~0ULL;
2343 hwc->config =
event->attr.config & pmu->
type->event_mask;
2344 if (pmu->
type->ops->hw_config) {
2345 ret = pmu->
type->ops->hw_config(box, event);
2351 if (event->group_leader != event)
2352 ret = uncore_validate_group(pmu, event);
2362 int n = cpulist_scnprintf(buf,
PAGE_SIZE - 2, &uncore_cpu_mask);
2371 static struct attribute *uncore_pmu_attrs[] = {
2372 &dev_attr_cpumask.attr,
2377 .attrs = uncore_pmu_attrs,
2384 pmu->
pmu = (
struct pmu) {
2385 .attr_groups = pmu->
type->attr_groups,
2388 .add = uncore_pmu_event_add,
2389 .del = uncore_pmu_event_del,
2390 .start = uncore_pmu_event_start,
2391 .stop = uncore_pmu_event_stop,
2392 .read = uncore_pmu_event_read,
2395 if (pmu->
type->num_boxes == 1) {
2417 kfree(type->events_group);
2418 type->events_group =
NULL;
2424 for (i = 0; types[
i]; i++)
2425 uncore_type_exit(types[i]);
2441 0, type->num_counters, 0);
2447 INIT_LIST_HEAD(&pmus[i].box_list);
2458 events_group = kzalloc(
sizeof(
struct attribute *) * (i + 1) +
2463 attrs = (
struct attribute **)(events_group + 1);
2464 events_group->
name =
"events";
2465 events_group->
attrs = attrs;
2467 for (j = 0; j <
i; j++)
2470 type->events_group = events_group;
2473 type->pmu_group = &uncore_pmu_attr_group;
2477 uncore_type_exit(type);
2485 for (i = 0; types[
i]; i++) {
2486 ret = uncore_type_init(types[i]);
2493 uncore_type_exit(types[i]);
2498 static bool pcidrv_registered;
2509 phys_id = pcibus_to_physid[pdev->
bus->number];
2522 pmu = &type->
pmus[
i];
2540 uncore_box_init(box);
2541 pci_set_drvdata(pdev, box);
2550 static void uncore_pci_remove(
struct pci_dev *pdev)
2554 int cpu, phys_id = pcibus_to_physid[pdev->
bus->number];
2581 return uncore_pci_add(type, pdev);
2584 static int __init uncore_pci_init(
void)
2590 ret = snbep_pci2phy_map_init();
2593 pci_uncores = snbep_pci_uncores;
2594 uncore_pci_driver = &snbep_uncore_pci_driver;
2600 ret = uncore_types_init(pci_uncores);
2604 uncore_pci_driver->
probe = uncore_pci_probe;
2605 uncore_pci_driver->
remove = uncore_pci_remove;
2607 ret = pci_register_driver(uncore_pci_driver);
2609 pcidrv_registered =
true;
2611 uncore_types_exit(pci_uncores);
2616 static void __init uncore_pci_exit(
void)
2618 if (pcidrv_registered) {
2619 pcidrv_registered =
false;
2621 uncore_types_exit(pci_uncores);
2625 static void __cpuinit uncore_cpu_dying(
int cpu)
2632 for (i = 0; msr_uncores[
i]; i++) {
2633 type = msr_uncores[
i];
2635 pmu = &type->
pmus[
j];
2644 static int __cpuinit uncore_cpu_starting(
int cpu)
2653 for (i = 0; msr_uncores[
i]; i++) {
2654 type = msr_uncores[
i];
2656 pmu = &type->
pmus[
j];
2659 if (box && box->
phys_id >= 0) {
2660 uncore_box_init(box);
2666 if (exist && exist->
phys_id == phys_id) {
2677 uncore_box_init(box);
2684 static int __cpuinit uncore_cpu_prepare(
int cpu,
int phys_id)
2691 for (i = 0; msr_uncores[
i]; i++) {
2692 type = msr_uncores[
i];
2694 pmu = &type->
pmus[
j];
2711 uncore_change_context(
struct intel_uncore_type **uncores,
int old_cpu,
int new_cpu)
2718 for (i = 0; uncores[
i]; i++) {
2721 pmu = &type->
pmus[
j];
2723 box = uncore_pmu_to_box(pmu, new_cpu);
2725 box = uncore_pmu_to_box(pmu, old_cpu);
2737 uncore_pmu_cancel_hrtimer(box);
2748 static void __cpuinit uncore_event_exit_cpu(
int cpu)
2753 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
2770 cpumask_set_cpu(target, &uncore_cpu_mask);
2772 uncore_change_context(msr_uncores, cpu, target);
2773 uncore_change_context(pci_uncores, cpu, target);
2776 static void __cpuinit uncore_event_init_cpu(
int cpu)
2786 cpumask_set_cpu(cpu, &uncore_cpu_mask);
2788 uncore_change_context(msr_uncores, -1, cpu);
2789 uncore_change_context(pci_uncores, -1, cpu);
2795 unsigned int cpu = (
long)hcpu;
2800 uncore_cpu_prepare(cpu, -1);
2803 uncore_cpu_starting(cpu);
2807 uncore_cpu_dying(cpu);
2817 uncore_event_init_cpu(cpu);
2820 uncore_event_exit_cpu(cpu);
2830 .notifier_call = uncore_cpu_notifier,
2843 static int __init uncore_cpu_init(
void)
2853 msr_uncores = nhm_msr_uncores;
2856 if (snb_uncore_cbox.
num_boxes > max_cores)
2858 msr_uncores = snb_msr_uncores;
2861 if (snbep_uncore_cbox.
num_boxes > max_cores)
2862 snbep_uncore_cbox.
num_boxes = max_cores;
2863 msr_uncores = snbep_msr_uncores;
2866 uncore_nhmex =
true;
2869 nhmex_uncore_mbox.
event_descs = wsmex_uncore_mbox_events;
2870 if (nhmex_uncore_cbox.
num_boxes > max_cores)
2871 nhmex_uncore_cbox.
num_boxes = max_cores;
2872 msr_uncores = nhmex_msr_uncores;
2878 ret = uncore_types_init(msr_uncores);
2896 uncore_cpu_prepare(cpu, phys_id);
2897 uncore_event_init_cpu(cpu);
2901 register_cpu_notifier(&uncore_cpu_nb);
2908 static int __init uncore_pmus_register(
void)
2914 for (i = 0; msr_uncores[
i]; i++) {
2915 type = msr_uncores[
i];
2917 pmu = &type->
pmus[
j];
2918 uncore_pmu_register(pmu);
2922 for (i = 0; pci_uncores[
i]; i++) {
2923 type = pci_uncores[
i];
2925 pmu = &type->
pmus[
j];
2926 uncore_pmu_register(pmu);
2933 static int __init intel_uncore_init(
void)
2940 if (cpu_has_hypervisor)
2943 ret = uncore_pci_init();
2946 ret = uncore_cpu_init();
2952 uncore_pmus_register();