Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
perf_event_intel_uncore.c
Go to the documentation of this file.
2 
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8 
9 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
10 
11 /* mask of cpus that collect uncore events */
12 static cpumask_t uncore_cpu_mask;
13 
14 /* constraint for the fixed counter */
15 static struct event_constraint constraint_fixed =
16  EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
17 static struct event_constraint constraint_empty =
18  EVENT_CONSTRAINT(0, 0, 0);
19 
20 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
21 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
22 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
23 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
24 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
25 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
26 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
27 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
28 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
29 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
30 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
31 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
32 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
33 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
34 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
35 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
36 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
37 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
41 
42 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
43 {
44  u64 count;
45 
46  rdmsrl(event->hw.event_base, count);
47 
48  return count;
49 }
50 
51 /*
52  * generic get constraint function for shared match/mask registers.
53  */
54 static struct event_constraint *
55 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
56 {
57  struct intel_uncore_extra_reg *er;
58  struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
59  struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
60  unsigned long flags;
61  bool ok = false;
62 
63  /*
64  * reg->alloc can be set due to existing state, so for fake box we
65  * need to ignore this, otherwise we might fail to allocate proper
66  * fake state for this extra reg constraint.
67  */
68  if (reg1->idx == EXTRA_REG_NONE ||
69  (!uncore_box_is_fake(box) && reg1->alloc))
70  return NULL;
71 
72  er = &box->shared_regs[reg1->idx];
73  raw_spin_lock_irqsave(&er->lock, flags);
74  if (!atomic_read(&er->ref) ||
75  (er->config1 == reg1->config && er->config2 == reg2->config)) {
76  atomic_inc(&er->ref);
77  er->config1 = reg1->config;
78  er->config2 = reg2->config;
79  ok = true;
80  }
81  raw_spin_unlock_irqrestore(&er->lock, flags);
82 
83  if (ok) {
84  if (!uncore_box_is_fake(box))
85  reg1->alloc = 1;
86  return NULL;
87  }
88 
89  return &constraint_empty;
90 }
91 
92 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
93 {
94  struct intel_uncore_extra_reg *er;
95  struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
96 
97  /*
98  * Only put constraint if extra reg was actually allocated. Also
99  * takes care of event which do not use an extra shared reg.
100  *
101  * Also, if this is a fake box we shouldn't touch any event state
102  * (reg->alloc) and we don't care about leaving inconsistent box
103  * state either since it will be thrown out.
104  */
105  if (uncore_box_is_fake(box) || !reg1->alloc)
106  return;
107 
108  er = &box->shared_regs[reg1->idx];
109  atomic_dec(&er->ref);
110  reg1->alloc = 0;
111 }
112 
113 /* Sandy Bridge-EP uncore support */
114 static struct intel_uncore_type snbep_uncore_cbox;
115 static struct intel_uncore_type snbep_uncore_pcu;
116 
117 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
118 {
119  struct pci_dev *pdev = box->pci_dev;
120  int box_ctl = uncore_pci_box_ctl(box);
121  u32 config = 0;
122 
123  if (!pci_read_config_dword(pdev, box_ctl, &config)) {
124  config |= SNBEP_PMON_BOX_CTL_FRZ;
125  pci_write_config_dword(pdev, box_ctl, config);
126  }
127 }
128 
129 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
130 {
131  struct pci_dev *pdev = box->pci_dev;
132  int box_ctl = uncore_pci_box_ctl(box);
133  u32 config = 0;
134 
135  if (!pci_read_config_dword(pdev, box_ctl, &config)) {
136  config &= ~SNBEP_PMON_BOX_CTL_FRZ;
137  pci_write_config_dword(pdev, box_ctl, config);
138  }
139 }
140 
141 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
142 {
143  struct pci_dev *pdev = box->pci_dev;
144  struct hw_perf_event *hwc = &event->hw;
145 
146  pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
147 }
148 
149 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
150 {
151  struct pci_dev *pdev = box->pci_dev;
152  struct hw_perf_event *hwc = &event->hw;
153 
154  pci_write_config_dword(pdev, hwc->config_base, hwc->config);
155 }
156 
157 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
158 {
159  struct pci_dev *pdev = box->pci_dev;
160  struct hw_perf_event *hwc = &event->hw;
161  u64 count = 0;
162 
163  pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
164  pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
165 
166  return count;
167 }
168 
169 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
170 {
171  struct pci_dev *pdev = box->pci_dev;
172 
173  pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
174 }
175 
176 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
177 {
178  u64 config;
179  unsigned msr;
180 
181  msr = uncore_msr_box_ctl(box);
182  if (msr) {
183  rdmsrl(msr, config);
184  config |= SNBEP_PMON_BOX_CTL_FRZ;
185  wrmsrl(msr, config);
186  }
187 }
188 
189 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
190 {
191  u64 config;
192  unsigned msr;
193 
194  msr = uncore_msr_box_ctl(box);
195  if (msr) {
196  rdmsrl(msr, config);
197  config &= ~SNBEP_PMON_BOX_CTL_FRZ;
198  wrmsrl(msr, config);
199  }
200 }
201 
202 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
203 {
204  struct hw_perf_event *hwc = &event->hw;
205  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
206 
207  if (reg1->idx != EXTRA_REG_NONE)
208  wrmsrl(reg1->reg, reg1->config);
209 
210  wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
211 }
212 
213 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
214  struct perf_event *event)
215 {
216  struct hw_perf_event *hwc = &event->hw;
217 
218  wrmsrl(hwc->config_base, hwc->config);
219 }
220 
221 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
222 {
223  unsigned msr = uncore_msr_box_ctl(box);
224 
225  if (msr)
226  wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
227 }
228 
229 static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
230 {
231  struct hw_perf_event *hwc = &event->hw;
232  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
233 
234  if (box->pmu->type == &snbep_uncore_cbox) {
236  SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
237  reg1->config = event->attr.config1 &
239  } else {
240  if (box->pmu->type == &snbep_uncore_pcu) {
242  reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
243  } else {
244  return 0;
245  }
246  }
247  reg1->idx = 0;
248 
249  return 0;
250 }
251 
252 static struct attribute *snbep_uncore_formats_attr[] = {
253  &format_attr_event.attr,
254  &format_attr_umask.attr,
255  &format_attr_edge.attr,
256  &format_attr_inv.attr,
257  &format_attr_thresh8.attr,
258  NULL,
259 };
260 
261 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
262  &format_attr_event.attr,
263  &format_attr_umask.attr,
264  &format_attr_edge.attr,
265  &format_attr_inv.attr,
266  &format_attr_thresh5.attr,
267  NULL,
268 };
269 
270 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
271  &format_attr_event.attr,
272  &format_attr_umask.attr,
273  &format_attr_edge.attr,
274  &format_attr_tid_en.attr,
275  &format_attr_inv.attr,
276  &format_attr_thresh8.attr,
277  &format_attr_filter_tid.attr,
278  &format_attr_filter_nid.attr,
279  &format_attr_filter_state.attr,
280  &format_attr_filter_opc.attr,
281  NULL,
282 };
283 
284 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
285  &format_attr_event.attr,
286  &format_attr_occ_sel.attr,
287  &format_attr_edge.attr,
288  &format_attr_inv.attr,
289  &format_attr_thresh5.attr,
290  &format_attr_occ_invert.attr,
291  &format_attr_occ_edge.attr,
292  &format_attr_filter_band0.attr,
293  &format_attr_filter_band1.attr,
294  &format_attr_filter_band2.attr,
295  &format_attr_filter_band3.attr,
296  NULL,
297 };
298 
299 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
300  &format_attr_event_ext.attr,
301  &format_attr_umask.attr,
302  &format_attr_edge.attr,
303  &format_attr_inv.attr,
304  &format_attr_thresh8.attr,
305  NULL,
306 };
307 
308 static struct uncore_event_desc snbep_uncore_imc_events[] = {
309  INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
310  INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
311  INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
312  { /* end: all zeroes */ },
313 };
314 
315 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
316  INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
317  INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
318  INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
319  INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
320  { /* end: all zeroes */ },
321 };
322 
323 static struct attribute_group snbep_uncore_format_group = {
324  .name = "format",
325  .attrs = snbep_uncore_formats_attr,
326 };
327 
328 static struct attribute_group snbep_uncore_ubox_format_group = {
329  .name = "format",
330  .attrs = snbep_uncore_ubox_formats_attr,
331 };
332 
333 static struct attribute_group snbep_uncore_cbox_format_group = {
334  .name = "format",
335  .attrs = snbep_uncore_cbox_formats_attr,
336 };
337 
338 static struct attribute_group snbep_uncore_pcu_format_group = {
339  .name = "format",
340  .attrs = snbep_uncore_pcu_formats_attr,
341 };
342 
343 static struct attribute_group snbep_uncore_qpi_format_group = {
344  .name = "format",
345  .attrs = snbep_uncore_qpi_formats_attr,
346 };
347 
348 static struct intel_uncore_ops snbep_uncore_msr_ops = {
349  .init_box = snbep_uncore_msr_init_box,
350  .disable_box = snbep_uncore_msr_disable_box,
351  .enable_box = snbep_uncore_msr_enable_box,
352  .disable_event = snbep_uncore_msr_disable_event,
353  .enable_event = snbep_uncore_msr_enable_event,
354  .read_counter = uncore_msr_read_counter,
355  .get_constraint = uncore_get_constraint,
356  .put_constraint = uncore_put_constraint,
357  .hw_config = snbep_uncore_hw_config,
358 };
359 
360 static struct intel_uncore_ops snbep_uncore_pci_ops = {
361  .init_box = snbep_uncore_pci_init_box,
362  .disable_box = snbep_uncore_pci_disable_box,
363  .enable_box = snbep_uncore_pci_enable_box,
364  .disable_event = snbep_uncore_pci_disable_event,
365  .enable_event = snbep_uncore_pci_enable_event,
366  .read_counter = snbep_uncore_pci_read_counter,
367 };
368 
369 static struct event_constraint snbep_uncore_cbox_constraints[] = {
370  UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
371  UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
372  UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
373  UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
374  UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
375  UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
376  UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
377  UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
378  UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
379  UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
380  UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
381  UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
382  EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
383  UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
384  UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
385  UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
386  UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
387  UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
388  UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
389  UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
390  UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
391  UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
392  UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
393  UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
394  UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
396 };
397 
398 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
399  UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
400  UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
401  UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
402  UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
403  UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
404  UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
405  UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
406  UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
407  UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
408  UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
410 };
411 
412 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
413  UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
414  UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
415  UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
416  UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
417  UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
418  UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
419  UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
420  UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
421  UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
422  UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
423  UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
424  UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
425  UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
426  UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
427  UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
428  UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
429  UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
430  UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
432 };
433 
434 static struct intel_uncore_type snbep_uncore_ubox = {
435  .name = "ubox",
436  .num_counters = 2,
437  .num_boxes = 1,
438  .perf_ctr_bits = 44,
439  .fixed_ctr_bits = 48,
440  .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
441  .event_ctl = SNBEP_U_MSR_PMON_CTL0,
442  .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
443  .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
444  .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
445  .ops = &snbep_uncore_msr_ops,
446  .format_group = &snbep_uncore_ubox_format_group,
447 };
448 
449 static struct intel_uncore_type snbep_uncore_cbox = {
450  .name = "cbox",
451  .num_counters = 4,
452  .num_boxes = 8,
453  .perf_ctr_bits = 44,
454  .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
455  .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
456  .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
457  .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
458  .msr_offset = SNBEP_CBO_MSR_OFFSET,
459  .num_shared_regs = 1,
460  .constraints = snbep_uncore_cbox_constraints,
461  .ops = &snbep_uncore_msr_ops,
462  .format_group = &snbep_uncore_cbox_format_group,
463 };
464 
465 static struct intel_uncore_type snbep_uncore_pcu = {
466  .name = "pcu",
467  .num_counters = 4,
468  .num_boxes = 1,
469  .perf_ctr_bits = 48,
470  .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
471  .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
472  .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
473  .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
474  .num_shared_regs = 1,
475  .ops = &snbep_uncore_msr_ops,
476  .format_group = &snbep_uncore_pcu_format_group,
477 };
478 
479 static struct intel_uncore_type *snbep_msr_uncores[] = {
480  &snbep_uncore_ubox,
481  &snbep_uncore_cbox,
482  &snbep_uncore_pcu,
483  NULL,
484 };
485 
486 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
487  .perf_ctr = SNBEP_PCI_PMON_CTR0, \
488  .event_ctl = SNBEP_PCI_PMON_CTL0, \
489  .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
490  .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
491  .ops = &snbep_uncore_pci_ops, \
492  .format_group = &snbep_uncore_format_group
493 
494 static struct intel_uncore_type snbep_uncore_ha = {
495  .name = "ha",
496  .num_counters = 4,
497  .num_boxes = 1,
498  .perf_ctr_bits = 48,
500 };
501 
502 static struct intel_uncore_type snbep_uncore_imc = {
503  .name = "imc",
504  .num_counters = 4,
505  .num_boxes = 4,
506  .perf_ctr_bits = 48,
507  .fixed_ctr_bits = 48,
508  .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
509  .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
510  .event_descs = snbep_uncore_imc_events,
512 };
513 
514 static struct intel_uncore_type snbep_uncore_qpi = {
515  .name = "qpi",
516  .num_counters = 4,
517  .num_boxes = 2,
518  .perf_ctr_bits = 48,
519  .perf_ctr = SNBEP_PCI_PMON_CTR0,
520  .event_ctl = SNBEP_PCI_PMON_CTL0,
521  .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
522  .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
523  .ops = &snbep_uncore_pci_ops,
524  .event_descs = snbep_uncore_qpi_events,
525  .format_group = &snbep_uncore_qpi_format_group,
526 };
527 
528 
529 static struct intel_uncore_type snbep_uncore_r2pcie = {
530  .name = "r2pcie",
531  .num_counters = 4,
532  .num_boxes = 1,
533  .perf_ctr_bits = 44,
534  .constraints = snbep_uncore_r2pcie_constraints,
536 };
537 
538 static struct intel_uncore_type snbep_uncore_r3qpi = {
539  .name = "r3qpi",
540  .num_counters = 3,
541  .num_boxes = 2,
542  .perf_ctr_bits = 44,
543  .constraints = snbep_uncore_r3qpi_constraints,
545 };
546 
547 static struct intel_uncore_type *snbep_pci_uncores[] = {
548  &snbep_uncore_ha,
549  &snbep_uncore_imc,
550  &snbep_uncore_qpi,
551  &snbep_uncore_r2pcie,
552  &snbep_uncore_r3qpi,
553  NULL,
554 };
555 
556 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
557  { /* Home Agent */
559  .driver_data = (unsigned long)&snbep_uncore_ha,
560  },
561  { /* MC Channel 0 */
563  .driver_data = (unsigned long)&snbep_uncore_imc,
564  },
565  { /* MC Channel 1 */
567  .driver_data = (unsigned long)&snbep_uncore_imc,
568  },
569  { /* MC Channel 2 */
571  .driver_data = (unsigned long)&snbep_uncore_imc,
572  },
573  { /* MC Channel 3 */
575  .driver_data = (unsigned long)&snbep_uncore_imc,
576  },
577  { /* QPI Port 0 */
579  .driver_data = (unsigned long)&snbep_uncore_qpi,
580  },
581  { /* QPI Port 1 */
583  .driver_data = (unsigned long)&snbep_uncore_qpi,
584  },
585  { /* P2PCIe */
587  .driver_data = (unsigned long)&snbep_uncore_r2pcie,
588  },
589  { /* R3QPI Link 0 */
591  .driver_data = (unsigned long)&snbep_uncore_r3qpi,
592  },
593  { /* R3QPI Link 1 */
595  .driver_data = (unsigned long)&snbep_uncore_r3qpi,
596  },
597  { /* end: all zeroes */ }
598 };
599 
600 static struct pci_driver snbep_uncore_pci_driver = {
601  .name = "snbep_uncore",
602  .id_table = snbep_uncore_pci_ids,
603 };
604 
605 /*
606  * build pci bus to socket mapping
607  */
608 static int snbep_pci2phy_map_init(void)
609 {
610  struct pci_dev *ubox_dev = NULL;
611  int i, bus, nodeid;
612  int err = 0;
613  u32 config = 0;
614 
615  while (1) {
616  /* find the UBOX device */
619  ubox_dev);
620  if (!ubox_dev)
621  break;
622  bus = ubox_dev->bus->number;
623  /* get the Node ID of the local register */
624  err = pci_read_config_dword(ubox_dev, 0x40, &config);
625  if (err)
626  break;
627  nodeid = config;
628  /* get the Node ID mapping */
629  err = pci_read_config_dword(ubox_dev, 0x54, &config);
630  if (err)
631  break;
632  /*
633  * every three bits in the Node ID mapping register maps
634  * to a particular node.
635  */
636  for (i = 0; i < 8; i++) {
637  if (nodeid == ((config >> (3 * i)) & 0x7)) {
638  pcibus_to_physid[bus] = i;
639  break;
640  }
641  }
642  };
643 
644  if (ubox_dev)
645  pci_dev_put(ubox_dev);
646 
647  return err ? pcibios_err_to_errno(err) : 0;
648 }
649 /* end of Sandy Bridge-EP uncore support */
650 
651 /* Sandy Bridge uncore support */
652 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
653 {
654  struct hw_perf_event *hwc = &event->hw;
655 
656  if (hwc->idx < UNCORE_PMC_IDX_FIXED)
657  wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
658  else
659  wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
660 }
661 
662 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
663 {
664  wrmsrl(event->hw.config_base, 0);
665 }
666 
667 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
668 {
669  if (box->pmu->pmu_idx == 0) {
672  }
673 }
674 
675 static struct uncore_event_desc snb_uncore_events[] = {
676  INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
677  { /* end: all zeroes */ },
678 };
679 
680 static struct attribute *snb_uncore_formats_attr[] = {
681  &format_attr_event.attr,
682  &format_attr_umask.attr,
683  &format_attr_edge.attr,
684  &format_attr_inv.attr,
685  &format_attr_cmask5.attr,
686  NULL,
687 };
688 
689 static struct attribute_group snb_uncore_format_group = {
690  .name = "format",
691  .attrs = snb_uncore_formats_attr,
692 };
693 
694 static struct intel_uncore_ops snb_uncore_msr_ops = {
695  .init_box = snb_uncore_msr_init_box,
696  .disable_event = snb_uncore_msr_disable_event,
697  .enable_event = snb_uncore_msr_enable_event,
698  .read_counter = uncore_msr_read_counter,
699 };
700 
701 static struct event_constraint snb_uncore_cbox_constraints[] = {
702  UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
703  UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
705 };
706 
707 static struct intel_uncore_type snb_uncore_cbox = {
708  .name = "cbox",
709  .num_counters = 2,
710  .num_boxes = 4,
711  .perf_ctr_bits = 44,
712  .fixed_ctr_bits = 48,
713  .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
714  .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
715  .fixed_ctr = SNB_UNC_FIXED_CTR,
716  .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
717  .single_fixed = 1,
718  .event_mask = SNB_UNC_RAW_EVENT_MASK,
719  .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
720  .constraints = snb_uncore_cbox_constraints,
721  .ops = &snb_uncore_msr_ops,
722  .format_group = &snb_uncore_format_group,
723  .event_descs = snb_uncore_events,
724 };
725 
726 static struct intel_uncore_type *snb_msr_uncores[] = {
727  &snb_uncore_cbox,
728  NULL,
729 };
730 /* end of Sandy Bridge uncore support */
731 
732 /* Nehalem uncore support */
733 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
734 {
735  wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
736 }
737 
738 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
739 {
741 }
742 
743 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
744 {
745  struct hw_perf_event *hwc = &event->hw;
746 
747  if (hwc->idx < UNCORE_PMC_IDX_FIXED)
748  wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
749  else
750  wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
751 }
752 
753 static struct attribute *nhm_uncore_formats_attr[] = {
754  &format_attr_event.attr,
755  &format_attr_umask.attr,
756  &format_attr_edge.attr,
757  &format_attr_inv.attr,
758  &format_attr_cmask8.attr,
759  NULL,
760 };
761 
762 static struct attribute_group nhm_uncore_format_group = {
763  .name = "format",
764  .attrs = nhm_uncore_formats_attr,
765 };
766 
767 static struct uncore_event_desc nhm_uncore_events[] = {
768  INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
769  INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
770  INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
771  INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
772  INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
773  INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
774  INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
775  INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
776  INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
777  { /* end: all zeroes */ },
778 };
779 
780 static struct intel_uncore_ops nhm_uncore_msr_ops = {
781  .disable_box = nhm_uncore_msr_disable_box,
782  .enable_box = nhm_uncore_msr_enable_box,
783  .disable_event = snb_uncore_msr_disable_event,
784  .enable_event = nhm_uncore_msr_enable_event,
785  .read_counter = uncore_msr_read_counter,
786 };
787 
788 static struct intel_uncore_type nhm_uncore = {
789  .name = "",
790  .num_counters = 8,
791  .num_boxes = 1,
792  .perf_ctr_bits = 48,
793  .fixed_ctr_bits = 48,
794  .event_ctl = NHM_UNC_PERFEVTSEL0,
795  .perf_ctr = NHM_UNC_UNCORE_PMC0,
796  .fixed_ctr = NHM_UNC_FIXED_CTR,
797  .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
798  .event_mask = NHM_UNC_RAW_EVENT_MASK,
799  .event_descs = nhm_uncore_events,
800  .ops = &nhm_uncore_msr_ops,
801  .format_group = &nhm_uncore_format_group,
802 };
803 
804 static struct intel_uncore_type *nhm_msr_uncores[] = {
805  &nhm_uncore,
806  NULL,
807 };
808 /* end of Nehalem uncore support */
809 
810 /* Nehalem-EX uncore support */
811 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
812  ((1ULL << (n)) - 1)))
813 
814 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
816 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
817 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
818 
819 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
820 {
822 }
823 
824 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
825 {
826  unsigned msr = uncore_msr_box_ctl(box);
827  u64 config;
828 
829  if (msr) {
830  rdmsrl(msr, config);
831  config &= ~((1ULL << uncore_num_counters(box)) - 1);
832  /* WBox has a fixed counter */
833  if (uncore_msr_fixed_ctl(box))
834  config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
835  wrmsrl(msr, config);
836  }
837 }
838 
839 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
840 {
841  unsigned msr = uncore_msr_box_ctl(box);
842  u64 config;
843 
844  if (msr) {
845  rdmsrl(msr, config);
846  config |= (1ULL << uncore_num_counters(box)) - 1;
847  /* WBox has a fixed counter */
848  if (uncore_msr_fixed_ctl(box))
850  wrmsrl(msr, config);
851  }
852 }
853 
854 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
855 {
856  wrmsrl(event->hw.config_base, 0);
857 }
858 
859 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
860 {
861  struct hw_perf_event *hwc = &event->hw;
862 
863  if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
864  wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
865  else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
866  wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
867  else
868  wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
869 }
870 
871 #define NHMEX_UNCORE_OPS_COMMON_INIT() \
872  .init_box = nhmex_uncore_msr_init_box, \
873  .disable_box = nhmex_uncore_msr_disable_box, \
874  .enable_box = nhmex_uncore_msr_enable_box, \
875  .disable_event = nhmex_uncore_msr_disable_event, \
876  .read_counter = uncore_msr_read_counter
877 
878 static struct intel_uncore_ops nhmex_uncore_ops = {
880  .enable_event = nhmex_uncore_msr_enable_event,
881 };
882 
883 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
884  &format_attr_event.attr,
885  &format_attr_edge.attr,
886  NULL,
887 };
888 
889 static struct attribute_group nhmex_uncore_ubox_format_group = {
890  .name = "format",
891  .attrs = nhmex_uncore_ubox_formats_attr,
892 };
893 
894 static struct intel_uncore_type nhmex_uncore_ubox = {
895  .name = "ubox",
896  .num_counters = 1,
897  .num_boxes = 1,
898  .perf_ctr_bits = 48,
899  .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
900  .perf_ctr = NHMEX_U_MSR_PMON_CTR,
901  .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
902  .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
903  .ops = &nhmex_uncore_ops,
904  .format_group = &nhmex_uncore_ubox_format_group
905 };
906 
907 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
908  &format_attr_event.attr,
909  &format_attr_umask.attr,
910  &format_attr_edge.attr,
911  &format_attr_inv.attr,
912  &format_attr_thresh8.attr,
913  NULL,
914 };
915 
916 static struct attribute_group nhmex_uncore_cbox_format_group = {
917  .name = "format",
918  .attrs = nhmex_uncore_cbox_formats_attr,
919 };
920 
921 /* msr offset for each instance of cbox */
922 static unsigned nhmex_cbox_msr_offsets[] = {
923  0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
924 };
925 
926 static struct intel_uncore_type nhmex_uncore_cbox = {
927  .name = "cbox",
928  .num_counters = 6,
929  .num_boxes = 10,
930  .perf_ctr_bits = 48,
931  .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
932  .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
933  .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
934  .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
935  .msr_offsets = nhmex_cbox_msr_offsets,
936  .pair_ctr_ctl = 1,
937  .ops = &nhmex_uncore_ops,
938  .format_group = &nhmex_uncore_cbox_format_group
939 };
940 
941 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
942  INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
943  { /* end: all zeroes */ },
944 };
945 
946 static struct intel_uncore_type nhmex_uncore_wbox = {
947  .name = "wbox",
948  .num_counters = 4,
949  .num_boxes = 1,
950  .perf_ctr_bits = 48,
951  .event_ctl = NHMEX_W_MSR_PMON_CNT0,
952  .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
953  .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
954  .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
955  .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
956  .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
957  .pair_ctr_ctl = 1,
958  .event_descs = nhmex_uncore_wbox_events,
959  .ops = &nhmex_uncore_ops,
960  .format_group = &nhmex_uncore_cbox_format_group
961 };
962 
963 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
964 {
965  struct hw_perf_event *hwc = &event->hw;
966  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
967  struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
968  int ctr, ev_sel;
969 
970  ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
972  ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
974 
975  /* events that do not use the match/mask registers */
976  if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
977  (ctr == 2 && ev_sel != 0x4) || ctr == 3)
978  return 0;
979 
980  if (box->pmu->pmu_idx == 0)
981  reg1->reg = NHMEX_B0_MSR_MATCH;
982  else
983  reg1->reg = NHMEX_B1_MSR_MATCH;
984  reg1->idx = 0;
985  reg1->config = event->attr.config1;
986  reg2->config = event->attr.config2;
987  return 0;
988 }
989 
990 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
991 {
992  struct hw_perf_event *hwc = &event->hw;
993  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
994  struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
995 
996  if (reg1->idx != EXTRA_REG_NONE) {
997  wrmsrl(reg1->reg, reg1->config);
998  wrmsrl(reg1->reg + 1, reg2->config);
999  }
1000  wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1001  (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1002 }
1003 
1004 /*
1005  * The Bbox has 4 counters, but each counter monitors different events.
1006  * Use bits 6-7 in the event config to select counter.
1007  */
1008 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1009  EVENT_CONSTRAINT(0 , 1, 0xc0),
1010  EVENT_CONSTRAINT(0x40, 2, 0xc0),
1011  EVENT_CONSTRAINT(0x80, 4, 0xc0),
1012  EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1014 };
1015 
1016 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1017  &format_attr_event5.attr,
1018  &format_attr_counter.attr,
1019  &format_attr_match.attr,
1020  &format_attr_mask.attr,
1021  NULL,
1022 };
1023 
1024 static struct attribute_group nhmex_uncore_bbox_format_group = {
1025  .name = "format",
1026  .attrs = nhmex_uncore_bbox_formats_attr,
1027 };
1028 
1029 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1031  .enable_event = nhmex_bbox_msr_enable_event,
1032  .hw_config = nhmex_bbox_hw_config,
1033  .get_constraint = uncore_get_constraint,
1034  .put_constraint = uncore_put_constraint,
1035 };
1036 
1037 static struct intel_uncore_type nhmex_uncore_bbox = {
1038  .name = "bbox",
1039  .num_counters = 4,
1040  .num_boxes = 2,
1041  .perf_ctr_bits = 48,
1042  .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1043  .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1044  .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1045  .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1046  .msr_offset = NHMEX_B_MSR_OFFSET,
1047  .pair_ctr_ctl = 1,
1048  .num_shared_regs = 1,
1049  .constraints = nhmex_uncore_bbox_constraints,
1050  .ops = &nhmex_uncore_bbox_ops,
1051  .format_group = &nhmex_uncore_bbox_format_group
1052 };
1053 
1054 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1055 {
1056  struct hw_perf_event *hwc = &event->hw;
1057  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1058  struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1059 
1060  /* only TO_R_PROG_EV event uses the match/mask register */
1061  if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1063  return 0;
1064 
1065  if (box->pmu->pmu_idx == 0)
1066  reg1->reg = NHMEX_S0_MSR_MM_CFG;
1067  else
1068  reg1->reg = NHMEX_S1_MSR_MM_CFG;
1069  reg1->idx = 0;
1070  reg1->config = event->attr.config1;
1071  reg2->config = event->attr.config2;
1072  return 0;
1073 }
1074 
1075 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1076 {
1077  struct hw_perf_event *hwc = &event->hw;
1078  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1079  struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1080 
1081  if (reg1->idx != EXTRA_REG_NONE) {
1082  wrmsrl(reg1->reg, 0);
1083  wrmsrl(reg1->reg + 1, reg1->config);
1084  wrmsrl(reg1->reg + 2, reg2->config);
1085  wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1086  }
1087  wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1088 }
1089 
1090 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1091  &format_attr_event.attr,
1092  &format_attr_umask.attr,
1093  &format_attr_edge.attr,
1094  &format_attr_inv.attr,
1095  &format_attr_thresh8.attr,
1096  &format_attr_match.attr,
1097  &format_attr_mask.attr,
1098  NULL,
1099 };
1100 
1101 static struct attribute_group nhmex_uncore_sbox_format_group = {
1102  .name = "format",
1103  .attrs = nhmex_uncore_sbox_formats_attr,
1104 };
1105 
1106 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1108  .enable_event = nhmex_sbox_msr_enable_event,
1109  .hw_config = nhmex_sbox_hw_config,
1110  .get_constraint = uncore_get_constraint,
1111  .put_constraint = uncore_put_constraint,
1112 };
1113 
1114 static struct intel_uncore_type nhmex_uncore_sbox = {
1115  .name = "sbox",
1116  .num_counters = 4,
1117  .num_boxes = 2,
1118  .perf_ctr_bits = 48,
1119  .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
1120  .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
1121  .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1122  .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1123  .msr_offset = NHMEX_S_MSR_OFFSET,
1124  .pair_ctr_ctl = 1,
1125  .num_shared_regs = 1,
1126  .ops = &nhmex_uncore_sbox_ops,
1127  .format_group = &nhmex_uncore_sbox_format_group
1128 };
1129 
1130 enum {
1139 };
1140 
1141 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1143  MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1144  MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1145  MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1146  /* event 0xa uses two extra registers */
1147  MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1150  /* events 0xd ~ 0x10 use the same extra register */
1151  MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1152  MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1153  MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1154  MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1155  MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1157  MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1158  MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1161 };
1162 
1163 /* Nehalem-EX or Westmere-EX ? */
1165 
1166 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1167 {
1168  struct intel_uncore_extra_reg *er;
1169  unsigned long flags;
1170  bool ret = false;
1171  u64 mask;
1172 
1173  if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1174  er = &box->shared_regs[idx];
1175  raw_spin_lock_irqsave(&er->lock, flags);
1176  if (!atomic_read(&er->ref) || er->config == config) {
1177  atomic_inc(&er->ref);
1178  er->config = config;
1179  ret = true;
1180  }
1181  raw_spin_unlock_irqrestore(&er->lock, flags);
1182 
1183  return ret;
1184  }
1185  /*
1186  * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1187  * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1188  * fields which are shared.
1189  */
1191  if (WARN_ON_ONCE(idx >= 4))
1192  return false;
1193 
1194  /* mask of the shared fields */
1195  if (uncore_nhmex)
1197  else
1200 
1201  raw_spin_lock_irqsave(&er->lock, flags);
1202  /* add mask of the non-shared field if it's in use */
1203  if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
1204  if (uncore_nhmex)
1206  else
1208  }
1209 
1210  if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1211  atomic_add(1 << (idx * 8), &er->ref);
1212  if (uncore_nhmex)
1215  else
1218  er->config &= ~mask;
1219  er->config |= (config & mask);
1220  ret = true;
1221  }
1222  raw_spin_unlock_irqrestore(&er->lock, flags);
1223 
1224  return ret;
1225 }
1226 
1227 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1228 {
1229  struct intel_uncore_extra_reg *er;
1230 
1231  if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1232  er = &box->shared_regs[idx];
1233  atomic_dec(&er->ref);
1234  return;
1235  }
1236 
1239  atomic_sub(1 << (idx * 8), &er->ref);
1240 }
1241 
1242 u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1243 {
1244  struct hw_perf_event *hwc = &event->hw;
1245  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1246  int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
1247  u64 config = reg1->config;
1248 
1249  /* get the non-shared control bits and shift them */
1250  idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1251  if (uncore_nhmex)
1252  config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1253  else
1254  config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1255  if (new_idx > orig_idx) {
1256  idx = new_idx - orig_idx;
1257  config <<= 3 * idx;
1258  } else {
1259  idx = orig_idx - new_idx;
1260  config >>= 3 * idx;
1261  }
1262 
1263  /* add the shared control bits back */
1264  if (uncore_nhmex)
1265  config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1266  else
1267  config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1268  config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1269  if (modify) {
1270  /* adjust the main event selector */
1271  if (new_idx > orig_idx)
1272  hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1273  else
1274  hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1275  reg1->config = config;
1276  reg1->idx = ~0xff | new_idx;
1277  }
1278  return config;
1279 }
1280 
1281 static struct event_constraint *
1282 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1283 {
1284  struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1285  struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1286  int i, idx[2], alloc = 0;
1287  u64 config1 = reg1->config;
1288 
1289  idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
1290  idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
1291 again:
1292  for (i = 0; i < 2; i++) {
1293  if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
1294  idx[i] = 0xff;
1295 
1296  if (idx[i] == 0xff)
1297  continue;
1298 
1299  if (!nhmex_mbox_get_shared_reg(box, idx[i],
1300  __BITS_VALUE(config1, i, 32)))
1301  goto fail;
1302  alloc |= (0x1 << i);
1303  }
1304 
1305  /* for the match/mask registers */
1306  if (reg2->idx != EXTRA_REG_NONE &&
1307  (uncore_box_is_fake(box) || !reg2->alloc) &&
1308  !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
1309  goto fail;
1310 
1311  /*
1312  * If it's a fake box -- as per validate_{group,event}() we
1313  * shouldn't touch event state and we can avoid doing so
1314  * since both will only call get_event_constraints() once
1315  * on each event, this avoids the need for reg->alloc.
1316  */
1317  if (!uncore_box_is_fake(box)) {
1318  if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
1319  nhmex_mbox_alter_er(event, idx[0], true);
1320  reg1->alloc |= alloc;
1321  if (reg2->idx != EXTRA_REG_NONE)
1322  reg2->alloc = 1;
1323  }
1324  return NULL;
1325 fail:
1326  if (idx[0] != 0xff && !(alloc & 0x1) &&
1327  idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1328  /*
1329  * events 0xd ~ 0x10 are functional identical, but are
1330  * controlled by different fields in the ZDP_CTL_FVC
1331  * register. If we failed to take one field, try the
1332  * rest 3 choices.
1333  */
1334  BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
1336  idx[0] = (idx[0] + 1) % 4;
1338  if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
1339  config1 = nhmex_mbox_alter_er(event, idx[0], false);
1340  goto again;
1341  }
1342  }
1343 
1344  if (alloc & 0x1)
1345  nhmex_mbox_put_shared_reg(box, idx[0]);
1346  if (alloc & 0x2)
1347  nhmex_mbox_put_shared_reg(box, idx[1]);
1348  return &constraint_empty;
1349 }
1350 
1351 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1352 {
1353  struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1354  struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1355 
1356  if (uncore_box_is_fake(box))
1357  return;
1358 
1359  if (reg1->alloc & 0x1)
1360  nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
1361  if (reg1->alloc & 0x2)
1362  nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
1363  reg1->alloc = 0;
1364 
1365  if (reg2->alloc) {
1366  nhmex_mbox_put_shared_reg(box, reg2->idx);
1367  reg2->alloc = 0;
1368  }
1369 }
1370 
1371 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
1372 {
1374  return er->idx;
1375  return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
1376 }
1377 
1378 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1379 {
1380  struct intel_uncore_type *type = box->pmu->type;
1381  struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1382  struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1383  struct extra_reg *er;
1384  unsigned msr;
1385  int reg_idx = 0;
1386  /*
1387  * The mbox events may require 2 extra MSRs at the most. But only
1388  * the lower 32 bits in these MSRs are significant, so we can use
1389  * config1 to pass two MSRs' config.
1390  */
1391  for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
1392  if (er->event != (event->hw.config & er->config_mask))
1393  continue;
1394  if (event->attr.config1 & ~er->valid_mask)
1395  return -EINVAL;
1396 
1397  msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
1398  if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
1399  return -EINVAL;
1400 
1401  /* always use the 32~63 bits to pass the PLD config */
1402  if (er->idx == EXTRA_REG_NHMEX_M_PLD)
1403  reg_idx = 1;
1404  else if (WARN_ON_ONCE(reg_idx > 0))
1405  return -EINVAL;
1406 
1407  reg1->idx &= ~(0xff << (reg_idx * 8));
1408  reg1->reg &= ~(0xffff << (reg_idx * 16));
1409  reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
1410  reg1->reg |= msr << (reg_idx * 16);
1411  reg1->config = event->attr.config1;
1412  reg_idx++;
1413  }
1414  /*
1415  * The mbox only provides ability to perform address matching
1416  * for the PLD events.
1417  */
1418  if (reg_idx == 2) {
1419  reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
1420  if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
1421  reg2->config = event->attr.config2;
1422  else
1423  reg2->config = ~0ULL;
1424  if (box->pmu->pmu_idx == 0)
1425  reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
1426  else
1427  reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
1428  }
1429  return 0;
1430 }
1431 
1432 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1433 {
1434  struct intel_uncore_extra_reg *er;
1435  unsigned long flags;
1436  u64 config;
1437 
1439  return box->shared_regs[idx].config;
1440 
1442  raw_spin_lock_irqsave(&er->lock, flags);
1443  config = er->config;
1444  raw_spin_unlock_irqrestore(&er->lock, flags);
1445  return config;
1446 }
1447 
1448 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1449 {
1450  struct hw_perf_event *hwc = &event->hw;
1451  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1452  struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1453  int idx;
1454 
1455  idx = __BITS_VALUE(reg1->idx, 0, 8);
1456  if (idx != 0xff)
1457  wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
1458  nhmex_mbox_shared_reg_config(box, idx));
1459  idx = __BITS_VALUE(reg1->idx, 1, 8);
1460  if (idx != 0xff)
1461  wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
1462  nhmex_mbox_shared_reg_config(box, idx));
1463 
1464  if (reg2->idx != EXTRA_REG_NONE) {
1465  wrmsrl(reg2->reg, 0);
1466  if (reg2->config != ~0ULL) {
1467  wrmsrl(reg2->reg + 1,
1469  wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
1471  wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
1472  }
1473  }
1474 
1475  wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1476 }
1477 
1478 DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
1479 DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
1480 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
1481 DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
1482 DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
1483 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
1484 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
1485 DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
1486 DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
1487 DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
1488 DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
1489 DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
1490 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
1491 DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
1492 DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
1493 DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
1494 
1495 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1496  &format_attr_count_mode.attr,
1497  &format_attr_storage_mode.attr,
1498  &format_attr_wrap_mode.attr,
1499  &format_attr_flag_mode.attr,
1500  &format_attr_inc_sel.attr,
1501  &format_attr_set_flag_sel.attr,
1502  &format_attr_filter_cfg_en.attr,
1503  &format_attr_filter_match.attr,
1504  &format_attr_filter_mask.attr,
1505  &format_attr_dsp.attr,
1506  &format_attr_thr.attr,
1507  &format_attr_fvc.attr,
1508  &format_attr_pgt.attr,
1509  &format_attr_map.attr,
1510  &format_attr_iss.attr,
1511  &format_attr_pld.attr,
1512  NULL,
1513 };
1514 
1515 static struct attribute_group nhmex_uncore_mbox_format_group = {
1516  .name = "format",
1517  .attrs = nhmex_uncore_mbox_formats_attr,
1518 };
1519 
1520 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
1521  INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
1522  INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
1523  { /* end: all zeroes */ },
1524 };
1525 
1526 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
1527  INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
1528  INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
1529  { /* end: all zeroes */ },
1530 };
1531 
1532 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
1534  .enable_event = nhmex_mbox_msr_enable_event,
1535  .hw_config = nhmex_mbox_hw_config,
1536  .get_constraint = nhmex_mbox_get_constraint,
1537  .put_constraint = nhmex_mbox_put_constraint,
1538 };
1539 
1540 static struct intel_uncore_type nhmex_uncore_mbox = {
1541  .name = "mbox",
1542  .num_counters = 6,
1543  .num_boxes = 2,
1544  .perf_ctr_bits = 48,
1545  .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
1546  .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
1547  .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
1548  .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
1549  .msr_offset = NHMEX_M_MSR_OFFSET,
1550  .pair_ctr_ctl = 1,
1551  .num_shared_regs = 8,
1552  .event_descs = nhmex_uncore_mbox_events,
1553  .ops = &nhmex_uncore_mbox_ops,
1554  .format_group = &nhmex_uncore_mbox_format_group,
1555 };
1556 
1557 void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1558 {
1559  struct hw_perf_event *hwc = &event->hw;
1560  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1561 
1562  /* adjust the main event selector and extra register index */
1563  if (reg1->idx % 2) {
1564  reg1->idx--;
1565  hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1566  } else {
1567  reg1->idx++;
1568  hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1569  }
1570 
1571  /* adjust extra register config */
1572  switch (reg1->idx % 6) {
1573  case 2:
1574  /* shift the 8~15 bits to the 0~7 bits */
1575  reg1->config >>= 8;
1576  break;
1577  case 3:
1578  /* shift the 0~7 bits to the 8~15 bits */
1579  reg1->config <<= 8;
1580  break;
1581  };
1582 }
1583 
1584 /*
1585  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
1586  * An event set consists of 6 events, the 3rd and 4th events in
1587  * an event set use the same extra register. So an event set uses
1588  * 5 extra registers.
1589  */
1590 static struct event_constraint *
1591 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1592 {
1593  struct hw_perf_event *hwc = &event->hw;
1594  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1595  struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1596  struct intel_uncore_extra_reg *er;
1597  unsigned long flags;
1598  int idx, er_idx;
1599  u64 config1;
1600  bool ok = false;
1601 
1602  if (!uncore_box_is_fake(box) && reg1->alloc)
1603  return NULL;
1604 
1605  idx = reg1->idx % 6;
1606  config1 = reg1->config;
1607 again:
1608  er_idx = idx;
1609  /* the 3rd and 4th events use the same extra register */
1610  if (er_idx > 2)
1611  er_idx--;
1612  er_idx += (reg1->idx / 6) * 5;
1613 
1614  er = &box->shared_regs[er_idx];
1615  raw_spin_lock_irqsave(&er->lock, flags);
1616  if (idx < 2) {
1617  if (!atomic_read(&er->ref) || er->config == reg1->config) {
1618  atomic_inc(&er->ref);
1619  er->config = reg1->config;
1620  ok = true;
1621  }
1622  } else if (idx == 2 || idx == 3) {
1623  /*
1624  * these two events use different fields in a extra register,
1625  * the 0~7 bits and the 8~15 bits respectively.
1626  */
1627  u64 mask = 0xff << ((idx - 2) * 8);
1628  if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1629  !((er->config ^ config1) & mask)) {
1630  atomic_add(1 << ((idx - 2) * 8), &er->ref);
1631  er->config &= ~mask;
1632  er->config |= config1 & mask;
1633  ok = true;
1634  }
1635  } else {
1636  if (!atomic_read(&er->ref) ||
1637  (er->config == (hwc->config >> 32) &&
1638  er->config1 == reg1->config &&
1639  er->config2 == reg2->config)) {
1640  atomic_inc(&er->ref);
1641  er->config = (hwc->config >> 32);
1642  er->config1 = reg1->config;
1643  er->config2 = reg2->config;
1644  ok = true;
1645  }
1646  }
1647  raw_spin_unlock_irqrestore(&er->lock, flags);
1648 
1649  if (!ok) {
1650  /*
1651  * The Rbox events are always in pairs. The paired
1652  * events are functional identical, but use different
1653  * extra registers. If we failed to take an extra
1654  * register, try the alternative.
1655  */
1656  if (idx % 2)
1657  idx--;
1658  else
1659  idx++;
1660  if (idx != reg1->idx % 6) {
1661  if (idx == 2)
1662  config1 >>= 8;
1663  else if (idx == 3)
1664  config1 <<= 8;
1665  goto again;
1666  }
1667  } else {
1668  if (!uncore_box_is_fake(box)) {
1669  if (idx != reg1->idx % 6)
1670  nhmex_rbox_alter_er(box, event);
1671  reg1->alloc = 1;
1672  }
1673  return NULL;
1674  }
1675  return &constraint_empty;
1676 }
1677 
1678 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1679 {
1680  struct intel_uncore_extra_reg *er;
1681  struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1682  int idx, er_idx;
1683 
1684  if (uncore_box_is_fake(box) || !reg1->alloc)
1685  return;
1686 
1687  idx = reg1->idx % 6;
1688  er_idx = idx;
1689  if (er_idx > 2)
1690  er_idx--;
1691  er_idx += (reg1->idx / 6) * 5;
1692 
1693  er = &box->shared_regs[er_idx];
1694  if (idx == 2 || idx == 3)
1695  atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1696  else
1697  atomic_dec(&er->ref);
1698 
1699  reg1->alloc = 0;
1700 }
1701 
1702 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1703 {
1704  struct hw_perf_event *hwc = &event->hw;
1705  struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1706  struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1707  int idx;
1708 
1709  idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1711  if (idx >= 0x18)
1712  return -EINVAL;
1713 
1714  reg1->idx = idx;
1715  reg1->config = event->attr.config1;
1716 
1717  switch (idx % 6) {
1718  case 4:
1719  case 5:
1720  hwc->config |= event->attr.config & (~0ULL << 32);
1721  reg2->config = event->attr.config2;
1722  break;
1723  };
1724  return 0;
1725 }
1726 
1727 static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1728 {
1729  struct intel_uncore_extra_reg *er;
1730  unsigned long flags;
1731  u64 config;
1732 
1733  er = &box->shared_regs[idx];
1734 
1735  raw_spin_lock_irqsave(&er->lock, flags);
1736  config = er->config;
1737  raw_spin_unlock_irqrestore(&er->lock, flags);
1738 
1739  return config;
1740 }
1741 
1742 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1743 {
1744  struct hw_perf_event *hwc = &event->hw;
1745  struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1746  struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1747  int idx, port;
1748 
1749  idx = reg1->idx;
1750  port = idx / 6 + box->pmu->pmu_idx * 4;
1751 
1752  switch (idx % 6) {
1753  case 0:
1754  wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1755  break;
1756  case 1:
1757  wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
1758  break;
1759  case 2:
1760  case 3:
1761  wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1762  nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
1763  break;
1764  case 4:
1765  wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1766  hwc->config >> 32);
1767  wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1768  wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1769  break;
1770  case 5:
1771  wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1772  hwc->config >> 32);
1773  wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1774  wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
1775  break;
1776  };
1777 
1778  wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1779  (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1780 }
1781 
1782 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1783 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
1784 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1785 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1786 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1787 
1788 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1789  &format_attr_event5.attr,
1790  &format_attr_xbr_mm_cfg.attr,
1791  &format_attr_xbr_match.attr,
1792  &format_attr_xbr_mask.attr,
1793  &format_attr_qlx_cfg.attr,
1794  &format_attr_iperf_cfg.attr,
1795  NULL,
1796 };
1797 
1798 static struct attribute_group nhmex_uncore_rbox_format_group = {
1799  .name = "format",
1800  .attrs = nhmex_uncore_rbox_formats_attr,
1801 };
1802 
1803 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1804  INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
1805  INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
1806  INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
1807  INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
1808  INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
1809  INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
1810  { /* end: all zeroes */ },
1811 };
1812 
1813 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1815  .enable_event = nhmex_rbox_msr_enable_event,
1816  .hw_config = nhmex_rbox_hw_config,
1817  .get_constraint = nhmex_rbox_get_constraint,
1818  .put_constraint = nhmex_rbox_put_constraint,
1819 };
1820 
1821 static struct intel_uncore_type nhmex_uncore_rbox = {
1822  .name = "rbox",
1823  .num_counters = 8,
1824  .num_boxes = 2,
1825  .perf_ctr_bits = 48,
1826  .event_ctl = NHMEX_R_MSR_PMON_CTL0,
1827  .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
1828  .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
1829  .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
1830  .msr_offset = NHMEX_R_MSR_OFFSET,
1831  .pair_ctr_ctl = 1,
1832  .num_shared_regs = 20,
1833  .event_descs = nhmex_uncore_rbox_events,
1834  .ops = &nhmex_uncore_rbox_ops,
1835  .format_group = &nhmex_uncore_rbox_format_group
1836 };
1837 
1838 static struct intel_uncore_type *nhmex_msr_uncores[] = {
1839  &nhmex_uncore_ubox,
1840  &nhmex_uncore_cbox,
1841  &nhmex_uncore_bbox,
1842  &nhmex_uncore_sbox,
1843  &nhmex_uncore_mbox,
1844  &nhmex_uncore_rbox,
1845  &nhmex_uncore_wbox,
1846  NULL,
1847 };
1848 /* end of Nehalem-EX uncore support */
1849 
1850 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
1851 {
1852  struct hw_perf_event *hwc = &event->hw;
1853 
1854  hwc->idx = idx;
1855  hwc->last_tag = ++box->tags[idx];
1856 
1857  if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
1858  hwc->event_base = uncore_fixed_ctr(box);
1859  hwc->config_base = uncore_fixed_ctl(box);
1860  return;
1861  }
1862 
1863  hwc->config_base = uncore_event_ctl(box, hwc->idx);
1864  hwc->event_base = uncore_perf_ctr(box, hwc->idx);
1865 }
1866 
1867 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
1868 {
1869  u64 prev_count, new_count, delta;
1870  int shift;
1871 
1872  if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
1873  shift = 64 - uncore_fixed_ctr_bits(box);
1874  else
1875  shift = 64 - uncore_perf_ctr_bits(box);
1876 
1877  /* the hrtimer might modify the previous event value */
1878 again:
1879  prev_count = local64_read(&event->hw.prev_count);
1880  new_count = uncore_read_counter(box, event);
1881  if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
1882  goto again;
1883 
1884  delta = (new_count << shift) - (prev_count << shift);
1885  delta >>= shift;
1886 
1887  local64_add(delta, &event->count);
1888 }
1889 
1890 /*
1891  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
1892  * for SandyBridge. So we use hrtimer to periodically poll the counter
1893  * to avoid overflow.
1894  */
1895 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
1896 {
1897  struct intel_uncore_box *box;
1898  unsigned long flags;
1899  int bit;
1900 
1901  box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
1902  if (!box->n_active || box->cpu != smp_processor_id())
1903  return HRTIMER_NORESTART;
1904  /*
1905  * disable local interrupt to prevent uncore_pmu_event_start/stop
1906  * to interrupt the update process
1907  */
1908  local_irq_save(flags);
1909 
1911  uncore_perf_event_update(box, box->events[bit]);
1912 
1913  local_irq_restore(flags);
1914 
1915  hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
1917 }
1918 
1919 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
1920 {
1921  __hrtimer_start_range_ns(&box->hrtimer,
1922  ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
1924 }
1925 
1926 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
1927 {
1928  hrtimer_cancel(&box->hrtimer);
1929 }
1930 
1931 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
1932 {
1934  box->hrtimer.function = uncore_pmu_hrtimer;
1935 }
1936 
1938 {
1939  struct intel_uncore_box *box;
1940  int i, size;
1941 
1942  size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
1943 
1944  box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
1945  if (!box)
1946  return NULL;
1947 
1948  for (i = 0; i < type->num_shared_regs; i++)
1949  raw_spin_lock_init(&box->shared_regs[i].lock);
1950 
1951  uncore_pmu_init_hrtimer(box);
1952  atomic_set(&box->refcnt, 1);
1953  box->cpu = -1;
1954  box->phys_id = -1;
1955 
1956  return box;
1957 }
1958 
1959 static struct intel_uncore_box *
1960 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
1961 {
1962  struct intel_uncore_box *box;
1963 
1964  box = *per_cpu_ptr(pmu->box, cpu);
1965  if (box)
1966  return box;
1967 
1968  raw_spin_lock(&uncore_box_lock);
1969  list_for_each_entry(box, &pmu->box_list, list) {
1970  if (box->phys_id == topology_physical_package_id(cpu)) {
1971  atomic_inc(&box->refcnt);
1972  *per_cpu_ptr(pmu->box, cpu) = box;
1973  break;
1974  }
1975  }
1976  raw_spin_unlock(&uncore_box_lock);
1977 
1978  return *per_cpu_ptr(pmu->box, cpu);
1979 }
1980 
1981 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
1982 {
1983  return container_of(event->pmu, struct intel_uncore_pmu, pmu);
1984 }
1985 
1986 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
1987 {
1988  /*
1989  * perf core schedules event on the basis of cpu, uncore events are
1990  * collected by one of the cpus inside a physical package.
1991  */
1992  return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
1993 }
1994 
1995 static int
1996 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
1997 {
1998  struct perf_event *event;
1999  int n, max_count;
2000 
2001  max_count = box->pmu->type->num_counters;
2002  if (box->pmu->type->fixed_ctl)
2003  max_count++;
2004 
2005  if (box->n_events >= max_count)
2006  return -EINVAL;
2007 
2008  n = box->n_events;
2009  box->event_list[n] = leader;
2010  n++;
2011  if (!dogrp)
2012  return n;
2013 
2014  list_for_each_entry(event, &leader->sibling_list, group_entry) {
2015  if (event->state <= PERF_EVENT_STATE_OFF)
2016  continue;
2017 
2018  if (n >= max_count)
2019  return -EINVAL;
2020 
2021  box->event_list[n] = event;
2022  n++;
2023  }
2024  return n;
2025 }
2026 
2027 static struct event_constraint *
2028 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2029 {
2030  struct intel_uncore_type *type = box->pmu->type;
2031  struct event_constraint *c;
2032 
2033  if (type->ops->get_constraint) {
2034  c = type->ops->get_constraint(box, event);
2035  if (c)
2036  return c;
2037  }
2038 
2039  if (event->hw.config == ~0ULL)
2040  return &constraint_fixed;
2041 
2042  if (type->constraints) {
2044  if ((event->hw.config & c->cmask) == c->code)
2045  return c;
2046  }
2047  }
2048 
2049  return &type->unconstrainted;
2050 }
2051 
2052 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2053 {
2054  if (box->pmu->type->ops->put_constraint)
2055  box->pmu->type->ops->put_constraint(box, event);
2056 }
2057 
2058 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
2059 {
2060  unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2061  struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
2062  int i, wmin, wmax, ret = 0;
2063  struct hw_perf_event *hwc;
2064 
2065  bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2066 
2067  for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
2068  c = uncore_get_event_constraint(box, box->event_list[i]);
2069  constraints[i] = c;
2070  wmin = min(wmin, c->weight);
2071  wmax = max(wmax, c->weight);
2072  }
2073 
2074  /* fastpath, try to reuse previous register */
2075  for (i = 0; i < n; i++) {
2076  hwc = &box->event_list[i]->hw;
2077  c = constraints[i];
2078 
2079  /* never assigned */
2080  if (hwc->idx == -1)
2081  break;
2082 
2083  /* constraint still honored */
2084  if (!test_bit(hwc->idx, c->idxmsk))
2085  break;
2086 
2087  /* not already used */
2088  if (test_bit(hwc->idx, used_mask))
2089  break;
2090 
2091  __set_bit(hwc->idx, used_mask);
2092  if (assign)
2093  assign[i] = hwc->idx;
2094  }
2095  /* slow path */
2096  if (i != n)
2097  ret = perf_assign_events(constraints, n, wmin, wmax, assign);
2098 
2099  if (!assign || ret) {
2100  for (i = 0; i < n; i++)
2101  uncore_put_event_constraint(box, box->event_list[i]);
2102  }
2103  return ret ? -EINVAL : 0;
2104 }
2105 
2106 static void uncore_pmu_event_start(struct perf_event *event, int flags)
2107 {
2108  struct intel_uncore_box *box = uncore_event_to_box(event);
2109  int idx = event->hw.idx;
2110 
2111  if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2112  return;
2113 
2114  if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2115  return;
2116 
2117  event->hw.state = 0;
2118  box->events[idx] = event;
2119  box->n_active++;
2120  __set_bit(idx, box->active_mask);
2121 
2122  local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2123  uncore_enable_event(box, event);
2124 
2125  if (box->n_active == 1) {
2126  uncore_enable_box(box);
2127  uncore_pmu_start_hrtimer(box);
2128  }
2129 }
2130 
2131 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2132 {
2133  struct intel_uncore_box *box = uncore_event_to_box(event);
2134  struct hw_perf_event *hwc = &event->hw;
2135 
2136  if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2137  uncore_disable_event(box, event);
2138  box->n_active--;
2139  box->events[hwc->idx] = NULL;
2140  WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2141  hwc->state |= PERF_HES_STOPPED;
2142 
2143  if (box->n_active == 0) {
2144  uncore_disable_box(box);
2145  uncore_pmu_cancel_hrtimer(box);
2146  }
2147  }
2148 
2149  if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2150  /*
2151  * Drain the remaining delta count out of a event
2152  * that we are disabling:
2153  */
2154  uncore_perf_event_update(box, event);
2155  hwc->state |= PERF_HES_UPTODATE;
2156  }
2157 }
2158 
2159 static int uncore_pmu_event_add(struct perf_event *event, int flags)
2160 {
2161  struct intel_uncore_box *box = uncore_event_to_box(event);
2162  struct hw_perf_event *hwc = &event->hw;
2163  int assign[UNCORE_PMC_IDX_MAX];
2164  int i, n, ret;
2165 
2166  if (!box)
2167  return -ENODEV;
2168 
2169  ret = n = uncore_collect_events(box, event, false);
2170  if (ret < 0)
2171  return ret;
2172 
2173  hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2174  if (!(flags & PERF_EF_START))
2175  hwc->state |= PERF_HES_ARCH;
2176 
2177  ret = uncore_assign_events(box, assign, n);
2178  if (ret)
2179  return ret;
2180 
2181  /* save events moving to new counters */
2182  for (i = 0; i < box->n_events; i++) {
2183  event = box->event_list[i];
2184  hwc = &event->hw;
2185 
2186  if (hwc->idx == assign[i] &&
2187  hwc->last_tag == box->tags[assign[i]])
2188  continue;
2189  /*
2190  * Ensure we don't accidentally enable a stopped
2191  * counter simply because we rescheduled.
2192  */
2193  if (hwc->state & PERF_HES_STOPPED)
2194  hwc->state |= PERF_HES_ARCH;
2195 
2196  uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2197  }
2198 
2199  /* reprogram moved events into new counters */
2200  for (i = 0; i < n; i++) {
2201  event = box->event_list[i];
2202  hwc = &event->hw;
2203 
2204  if (hwc->idx != assign[i] ||
2205  hwc->last_tag != box->tags[assign[i]])
2206  uncore_assign_hw_event(box, event, assign[i]);
2207  else if (i < box->n_events)
2208  continue;
2209 
2210  if (hwc->state & PERF_HES_ARCH)
2211  continue;
2212 
2213  uncore_pmu_event_start(event, 0);
2214  }
2215  box->n_events = n;
2216 
2217  return 0;
2218 }
2219 
2220 static void uncore_pmu_event_del(struct perf_event *event, int flags)
2221 {
2222  struct intel_uncore_box *box = uncore_event_to_box(event);
2223  int i;
2224 
2225  uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2226 
2227  for (i = 0; i < box->n_events; i++) {
2228  if (event == box->event_list[i]) {
2229  uncore_put_event_constraint(box, event);
2230 
2231  while (++i < box->n_events)
2232  box->event_list[i - 1] = box->event_list[i];
2233 
2234  --box->n_events;
2235  break;
2236  }
2237  }
2238 
2239  event->hw.idx = -1;
2240  event->hw.last_tag = ~0ULL;
2241 }
2242 
2243 static void uncore_pmu_event_read(struct perf_event *event)
2244 {
2245  struct intel_uncore_box *box = uncore_event_to_box(event);
2246  uncore_perf_event_update(box, event);
2247 }
2248 
2249 /*
2250  * validation ensures the group can be loaded onto the
2251  * PMU if it was the only group available.
2252  */
2253 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
2254  struct perf_event *event)
2255 {
2256  struct perf_event *leader = event->group_leader;
2257  struct intel_uncore_box *fake_box;
2258  int ret = -EINVAL, n;
2259 
2260  fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
2261  if (!fake_box)
2262  return -ENOMEM;
2263 
2264  fake_box->pmu = pmu;
2265  /*
2266  * the event is not yet connected with its
2267  * siblings therefore we must first collect
2268  * existing siblings, then add the new event
2269  * before we can simulate the scheduling
2270  */
2271  n = uncore_collect_events(fake_box, leader, true);
2272  if (n < 0)
2273  goto out;
2274 
2275  fake_box->n_events = n;
2276  n = uncore_collect_events(fake_box, event, false);
2277  if (n < 0)
2278  goto out;
2279 
2280  fake_box->n_events = n;
2281 
2282  ret = uncore_assign_events(fake_box, NULL, n);
2283 out:
2284  kfree(fake_box);
2285  return ret;
2286 }
2287 
2289 {
2290  struct intel_uncore_pmu *pmu;
2291  struct intel_uncore_box *box;
2292  struct hw_perf_event *hwc = &event->hw;
2293  int ret;
2294 
2295  if (event->attr.type != event->pmu->type)
2296  return -ENOENT;
2297 
2298  pmu = uncore_event_to_pmu(event);
2299  /* no device found for this pmu */
2300  if (pmu->func_id < 0)
2301  return -ENOENT;
2302 
2303  /*
2304  * Uncore PMU does measure at all privilege level all the time.
2305  * So it doesn't make sense to specify any exclude bits.
2306  */
2307  if (event->attr.exclude_user || event->attr.exclude_kernel ||
2308  event->attr.exclude_hv || event->attr.exclude_idle)
2309  return -EINVAL;
2310 
2311  /* Sampling not supported yet */
2312  if (hwc->sample_period)
2313  return -EINVAL;
2314 
2315  /*
2316  * Place all uncore events for a particular physical package
2317  * onto a single cpu
2318  */
2319  if (event->cpu < 0)
2320  return -EINVAL;
2321  box = uncore_pmu_to_box(pmu, event->cpu);
2322  if (!box || box->cpu < 0)
2323  return -EINVAL;
2324  event->cpu = box->cpu;
2325 
2326  event->hw.idx = -1;
2327  event->hw.last_tag = ~0ULL;
2328  event->hw.extra_reg.idx = EXTRA_REG_NONE;
2329  event->hw.branch_reg.idx = EXTRA_REG_NONE;
2330 
2331  if (event->attr.config == UNCORE_FIXED_EVENT) {
2332  /* no fixed counter */
2333  if (!pmu->type->fixed_ctl)
2334  return -EINVAL;
2335  /*
2336  * if there is only one fixed counter, only the first pmu
2337  * can access the fixed counter
2338  */
2339  if (pmu->type->single_fixed && pmu->pmu_idx > 0)
2340  return -EINVAL;
2341  hwc->config = ~0ULL;
2342  } else {
2343  hwc->config = event->attr.config & pmu->type->event_mask;
2344  if (pmu->type->ops->hw_config) {
2345  ret = pmu->type->ops->hw_config(box, event);
2346  if (ret)
2347  return ret;
2348  }
2349  }
2350 
2351  if (event->group_leader != event)
2352  ret = uncore_validate_group(pmu, event);
2353  else
2354  ret = 0;
2355 
2356  return ret;
2357 }
2358 
2359 static ssize_t uncore_get_attr_cpumask(struct device *dev,
2360  struct device_attribute *attr, char *buf)
2361 {
2362  int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
2363 
2364  buf[n++] = '\n';
2365  buf[n] = '\0';
2366  return n;
2367 }
2368 
2369 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
2370 
2371 static struct attribute *uncore_pmu_attrs[] = {
2372  &dev_attr_cpumask.attr,
2373  NULL,
2374 };
2375 
2376 static struct attribute_group uncore_pmu_attr_group = {
2377  .attrs = uncore_pmu_attrs,
2378 };
2379 
2380 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
2381 {
2382  int ret;
2383 
2384  pmu->pmu = (struct pmu) {
2385  .attr_groups = pmu->type->attr_groups,
2386  .task_ctx_nr = perf_invalid_context,
2387  .event_init = uncore_pmu_event_init,
2388  .add = uncore_pmu_event_add,
2389  .del = uncore_pmu_event_del,
2390  .start = uncore_pmu_event_start,
2391  .stop = uncore_pmu_event_stop,
2392  .read = uncore_pmu_event_read,
2393  };
2394 
2395  if (pmu->type->num_boxes == 1) {
2396  if (strlen(pmu->type->name) > 0)
2397  sprintf(pmu->name, "uncore_%s", pmu->type->name);
2398  else
2399  sprintf(pmu->name, "uncore");
2400  } else {
2401  sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
2402  pmu->pmu_idx);
2403  }
2404 
2405  ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
2406  return ret;
2407 }
2408 
2409 static void __init uncore_type_exit(struct intel_uncore_type *type)
2410 {
2411  int i;
2412 
2413  for (i = 0; i < type->num_boxes; i++)
2414  free_percpu(type->pmus[i].box);
2415  kfree(type->pmus);
2416  type->pmus = NULL;
2417  kfree(type->events_group);
2418  type->events_group = NULL;
2419 }
2420 
2421 static void __init uncore_types_exit(struct intel_uncore_type **types)
2422 {
2423  int i;
2424  for (i = 0; types[i]; i++)
2425  uncore_type_exit(types[i]);
2426 }
2427 
2428 static int __init uncore_type_init(struct intel_uncore_type *type)
2429 {
2430  struct intel_uncore_pmu *pmus;
2431  struct attribute_group *events_group;
2432  struct attribute **attrs;
2433  int i, j;
2434 
2435  pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
2436  if (!pmus)
2437  return -ENOMEM;
2438 
2439  type->unconstrainted = (struct event_constraint)
2440  __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
2441  0, type->num_counters, 0);
2442 
2443  for (i = 0; i < type->num_boxes; i++) {
2444  pmus[i].func_id = -1;
2445  pmus[i].pmu_idx = i;
2446  pmus[i].type = type;
2447  INIT_LIST_HEAD(&pmus[i].box_list);
2448  pmus[i].box = alloc_percpu(struct intel_uncore_box *);
2449  if (!pmus[i].box)
2450  goto fail;
2451  }
2452 
2453  if (type->event_descs) {
2454  i = 0;
2455  while (type->event_descs[i].attr.attr.name)
2456  i++;
2457 
2458  events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
2459  sizeof(*events_group), GFP_KERNEL);
2460  if (!events_group)
2461  goto fail;
2462 
2463  attrs = (struct attribute **)(events_group + 1);
2464  events_group->name = "events";
2465  events_group->attrs = attrs;
2466 
2467  for (j = 0; j < i; j++)
2468  attrs[j] = &type->event_descs[j].attr.attr;
2469 
2470  type->events_group = events_group;
2471  }
2472 
2473  type->pmu_group = &uncore_pmu_attr_group;
2474  type->pmus = pmus;
2475  return 0;
2476 fail:
2477  uncore_type_exit(type);
2478  return -ENOMEM;
2479 }
2480 
2481 static int __init uncore_types_init(struct intel_uncore_type **types)
2482 {
2483  int i, ret;
2484 
2485  for (i = 0; types[i]; i++) {
2486  ret = uncore_type_init(types[i]);
2487  if (ret)
2488  goto fail;
2489  }
2490  return 0;
2491 fail:
2492  while (--i >= 0)
2493  uncore_type_exit(types[i]);
2494  return ret;
2495 }
2496 
2497 static struct pci_driver *uncore_pci_driver;
2498 static bool pcidrv_registered;
2499 
2500 /*
2501  * add a pci uncore device
2502  */
2503 static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
2504 {
2505  struct intel_uncore_pmu *pmu;
2506  struct intel_uncore_box *box;
2507  int i, phys_id;
2508 
2509  phys_id = pcibus_to_physid[pdev->bus->number];
2510  if (phys_id < 0)
2511  return -ENODEV;
2512 
2513  box = uncore_alloc_box(type, 0);
2514  if (!box)
2515  return -ENOMEM;
2516 
2517  /*
2518  * for performance monitoring unit with multiple boxes,
2519  * each box has a different function id.
2520  */
2521  for (i = 0; i < type->num_boxes; i++) {
2522  pmu = &type->pmus[i];
2523  if (pmu->func_id == pdev->devfn)
2524  break;
2525  if (pmu->func_id < 0) {
2526  pmu->func_id = pdev->devfn;
2527  break;
2528  }
2529  pmu = NULL;
2530  }
2531 
2532  if (!pmu) {
2533  kfree(box);
2534  return -EINVAL;
2535  }
2536 
2537  box->phys_id = phys_id;
2538  box->pci_dev = pdev;
2539  box->pmu = pmu;
2540  uncore_box_init(box);
2541  pci_set_drvdata(pdev, box);
2542 
2543  raw_spin_lock(&uncore_box_lock);
2544  list_add_tail(&box->list, &pmu->box_list);
2545  raw_spin_unlock(&uncore_box_lock);
2546 
2547  return 0;
2548 }
2549 
2550 static void uncore_pci_remove(struct pci_dev *pdev)
2551 {
2552  struct intel_uncore_box *box = pci_get_drvdata(pdev);
2553  struct intel_uncore_pmu *pmu = box->pmu;
2554  int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
2555 
2556  if (WARN_ON_ONCE(phys_id != box->phys_id))
2557  return;
2558 
2559  raw_spin_lock(&uncore_box_lock);
2560  list_del(&box->list);
2561  raw_spin_unlock(&uncore_box_lock);
2562 
2563  for_each_possible_cpu(cpu) {
2564  if (*per_cpu_ptr(pmu->box, cpu) == box) {
2565  *per_cpu_ptr(pmu->box, cpu) = NULL;
2566  atomic_dec(&box->refcnt);
2567  }
2568  }
2569 
2570  WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
2571  kfree(box);
2572 }
2573 
2574 static int __devinit uncore_pci_probe(struct pci_dev *pdev,
2575  const struct pci_device_id *id)
2576 {
2577  struct intel_uncore_type *type;
2578 
2579  type = (struct intel_uncore_type *)id->driver_data;
2580 
2581  return uncore_pci_add(type, pdev);
2582 }
2583 
2584 static int __init uncore_pci_init(void)
2585 {
2586  int ret;
2587 
2588  switch (boot_cpu_data.x86_model) {
2589  case 45: /* Sandy Bridge-EP */
2590  ret = snbep_pci2phy_map_init();
2591  if (ret)
2592  return ret;
2593  pci_uncores = snbep_pci_uncores;
2594  uncore_pci_driver = &snbep_uncore_pci_driver;
2595  break;
2596  default:
2597  return 0;
2598  }
2599 
2600  ret = uncore_types_init(pci_uncores);
2601  if (ret)
2602  return ret;
2603 
2604  uncore_pci_driver->probe = uncore_pci_probe;
2605  uncore_pci_driver->remove = uncore_pci_remove;
2606 
2607  ret = pci_register_driver(uncore_pci_driver);
2608  if (ret == 0)
2609  pcidrv_registered = true;
2610  else
2611  uncore_types_exit(pci_uncores);
2612 
2613  return ret;
2614 }
2615 
2616 static void __init uncore_pci_exit(void)
2617 {
2618  if (pcidrv_registered) {
2619  pcidrv_registered = false;
2620  pci_unregister_driver(uncore_pci_driver);
2621  uncore_types_exit(pci_uncores);
2622  }
2623 }
2624 
2625 static void __cpuinit uncore_cpu_dying(int cpu)
2626 {
2627  struct intel_uncore_type *type;
2628  struct intel_uncore_pmu *pmu;
2629  struct intel_uncore_box *box;
2630  int i, j;
2631 
2632  for (i = 0; msr_uncores[i]; i++) {
2633  type = msr_uncores[i];
2634  for (j = 0; j < type->num_boxes; j++) {
2635  pmu = &type->pmus[j];
2636  box = *per_cpu_ptr(pmu->box, cpu);
2637  *per_cpu_ptr(pmu->box, cpu) = NULL;
2638  if (box && atomic_dec_and_test(&box->refcnt))
2639  kfree(box);
2640  }
2641  }
2642 }
2643 
2644 static int __cpuinit uncore_cpu_starting(int cpu)
2645 {
2646  struct intel_uncore_type *type;
2647  struct intel_uncore_pmu *pmu;
2648  struct intel_uncore_box *box, *exist;
2649  int i, j, k, phys_id;
2650 
2651  phys_id = topology_physical_package_id(cpu);
2652 
2653  for (i = 0; msr_uncores[i]; i++) {
2654  type = msr_uncores[i];
2655  for (j = 0; j < type->num_boxes; j++) {
2656  pmu = &type->pmus[j];
2657  box = *per_cpu_ptr(pmu->box, cpu);
2658  /* called by uncore_cpu_init? */
2659  if (box && box->phys_id >= 0) {
2660  uncore_box_init(box);
2661  continue;
2662  }
2663 
2664  for_each_online_cpu(k) {
2665  exist = *per_cpu_ptr(pmu->box, k);
2666  if (exist && exist->phys_id == phys_id) {
2667  atomic_inc(&exist->refcnt);
2668  *per_cpu_ptr(pmu->box, cpu) = exist;
2669  kfree(box);
2670  box = NULL;
2671  break;
2672  }
2673  }
2674 
2675  if (box) {
2676  box->phys_id = phys_id;
2677  uncore_box_init(box);
2678  }
2679  }
2680  }
2681  return 0;
2682 }
2683 
2684 static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
2685 {
2686  struct intel_uncore_type *type;
2687  struct intel_uncore_pmu *pmu;
2688  struct intel_uncore_box *box;
2689  int i, j;
2690 
2691  for (i = 0; msr_uncores[i]; i++) {
2692  type = msr_uncores[i];
2693  for (j = 0; j < type->num_boxes; j++) {
2694  pmu = &type->pmus[j];
2695  if (pmu->func_id < 0)
2696  pmu->func_id = j;
2697 
2698  box = uncore_alloc_box(type, cpu);
2699  if (!box)
2700  return -ENOMEM;
2701 
2702  box->pmu = pmu;
2703  box->phys_id = phys_id;
2704  *per_cpu_ptr(pmu->box, cpu) = box;
2705  }
2706  }
2707  return 0;
2708 }
2709 
2710 static void __cpuinit
2711 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
2712 {
2713  struct intel_uncore_type *type;
2714  struct intel_uncore_pmu *pmu;
2715  struct intel_uncore_box *box;
2716  int i, j;
2717 
2718  for (i = 0; uncores[i]; i++) {
2719  type = uncores[i];
2720  for (j = 0; j < type->num_boxes; j++) {
2721  pmu = &type->pmus[j];
2722  if (old_cpu < 0)
2723  box = uncore_pmu_to_box(pmu, new_cpu);
2724  else
2725  box = uncore_pmu_to_box(pmu, old_cpu);
2726  if (!box)
2727  continue;
2728 
2729  if (old_cpu < 0) {
2730  WARN_ON_ONCE(box->cpu != -1);
2731  box->cpu = new_cpu;
2732  continue;
2733  }
2734 
2735  WARN_ON_ONCE(box->cpu != old_cpu);
2736  if (new_cpu >= 0) {
2737  uncore_pmu_cancel_hrtimer(box);
2739  old_cpu, new_cpu);
2740  box->cpu = new_cpu;
2741  } else {
2742  box->cpu = -1;
2743  }
2744  }
2745  }
2746 }
2747 
2748 static void __cpuinit uncore_event_exit_cpu(int cpu)
2749 {
2750  int i, phys_id, target;
2751 
2752  /* if exiting cpu is used for collecting uncore events */
2753  if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
2754  return;
2755 
2756  /* find a new cpu to collect uncore events */
2757  phys_id = topology_physical_package_id(cpu);
2758  target = -1;
2759  for_each_online_cpu(i) {
2760  if (i == cpu)
2761  continue;
2762  if (phys_id == topology_physical_package_id(i)) {
2763  target = i;
2764  break;
2765  }
2766  }
2767 
2768  /* migrate uncore events to the new cpu */
2769  if (target >= 0)
2770  cpumask_set_cpu(target, &uncore_cpu_mask);
2771 
2772  uncore_change_context(msr_uncores, cpu, target);
2773  uncore_change_context(pci_uncores, cpu, target);
2774 }
2775 
2776 static void __cpuinit uncore_event_init_cpu(int cpu)
2777 {
2778  int i, phys_id;
2779 
2780  phys_id = topology_physical_package_id(cpu);
2781  for_each_cpu(i, &uncore_cpu_mask) {
2782  if (phys_id == topology_physical_package_id(i))
2783  return;
2784  }
2785 
2786  cpumask_set_cpu(cpu, &uncore_cpu_mask);
2787 
2788  uncore_change_context(msr_uncores, -1, cpu);
2789  uncore_change_context(pci_uncores, -1, cpu);
2790 }
2791 
2792 static int
2793  __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
2794 {
2795  unsigned int cpu = (long)hcpu;
2796 
2797  /* allocate/free data structure for uncore box */
2798  switch (action & ~CPU_TASKS_FROZEN) {
2799  case CPU_UP_PREPARE:
2800  uncore_cpu_prepare(cpu, -1);
2801  break;
2802  case CPU_STARTING:
2803  uncore_cpu_starting(cpu);
2804  break;
2805  case CPU_UP_CANCELED:
2806  case CPU_DYING:
2807  uncore_cpu_dying(cpu);
2808  break;
2809  default:
2810  break;
2811  }
2812 
2813  /* select the cpu that collects uncore events */
2814  switch (action & ~CPU_TASKS_FROZEN) {
2815  case CPU_DOWN_FAILED:
2816  case CPU_STARTING:
2817  uncore_event_init_cpu(cpu);
2818  break;
2819  case CPU_DOWN_PREPARE:
2820  uncore_event_exit_cpu(cpu);
2821  break;
2822  default:
2823  break;
2824  }
2825 
2826  return NOTIFY_OK;
2827 }
2828 
2829 static struct notifier_block uncore_cpu_nb __cpuinitdata = {
2830  .notifier_call = uncore_cpu_notifier,
2831  /*
2832  * to migrate uncore events, our notifier should be executed
2833  * before perf core's notifier.
2834  */
2835  .priority = CPU_PRI_PERF + 1,
2836 };
2837 
2838 static void __init uncore_cpu_setup(void *dummy)
2839 {
2840  uncore_cpu_starting(smp_processor_id());
2841 }
2842 
2843 static int __init uncore_cpu_init(void)
2844 {
2845  int ret, cpu, max_cores;
2846 
2847  max_cores = boot_cpu_data.x86_max_cores;
2848  switch (boot_cpu_data.x86_model) {
2849  case 26: /* Nehalem */
2850  case 30:
2851  case 37: /* Westmere */
2852  case 44:
2853  msr_uncores = nhm_msr_uncores;
2854  break;
2855  case 42: /* Sandy Bridge */
2856  if (snb_uncore_cbox.num_boxes > max_cores)
2857  snb_uncore_cbox.num_boxes = max_cores;
2858  msr_uncores = snb_msr_uncores;
2859  break;
2860  case 45: /* Sandy Birdge-EP */
2861  if (snbep_uncore_cbox.num_boxes > max_cores)
2862  snbep_uncore_cbox.num_boxes = max_cores;
2863  msr_uncores = snbep_msr_uncores;
2864  break;
2865  case 46: /* Nehalem-EX */
2866  uncore_nhmex = true;
2867  case 47: /* Westmere-EX aka. Xeon E7 */
2868  if (!uncore_nhmex)
2869  nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
2870  if (nhmex_uncore_cbox.num_boxes > max_cores)
2871  nhmex_uncore_cbox.num_boxes = max_cores;
2872  msr_uncores = nhmex_msr_uncores;
2873  break;
2874  default:
2875  return 0;
2876  }
2877 
2878  ret = uncore_types_init(msr_uncores);
2879  if (ret)
2880  return ret;
2881 
2882  get_online_cpus();
2883 
2884  for_each_online_cpu(cpu) {
2885  int i, phys_id = topology_physical_package_id(cpu);
2886 
2887  for_each_cpu(i, &uncore_cpu_mask) {
2888  if (phys_id == topology_physical_package_id(i)) {
2889  phys_id = -1;
2890  break;
2891  }
2892  }
2893  if (phys_id < 0)
2894  continue;
2895 
2896  uncore_cpu_prepare(cpu, phys_id);
2897  uncore_event_init_cpu(cpu);
2898  }
2899  on_each_cpu(uncore_cpu_setup, NULL, 1);
2900 
2901  register_cpu_notifier(&uncore_cpu_nb);
2902 
2903  put_online_cpus();
2904 
2905  return 0;
2906 }
2907 
2908 static int __init uncore_pmus_register(void)
2909 {
2910  struct intel_uncore_pmu *pmu;
2911  struct intel_uncore_type *type;
2912  int i, j;
2913 
2914  for (i = 0; msr_uncores[i]; i++) {
2915  type = msr_uncores[i];
2916  for (j = 0; j < type->num_boxes; j++) {
2917  pmu = &type->pmus[j];
2918  uncore_pmu_register(pmu);
2919  }
2920  }
2921 
2922  for (i = 0; pci_uncores[i]; i++) {
2923  type = pci_uncores[i];
2924  for (j = 0; j < type->num_boxes; j++) {
2925  pmu = &type->pmus[j];
2926  uncore_pmu_register(pmu);
2927  }
2928  }
2929 
2930  return 0;
2931 }
2932 
2933 static int __init intel_uncore_init(void)
2934 {
2935  int ret;
2936 
2937  if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2938  return -ENODEV;
2939 
2940  if (cpu_has_hypervisor)
2941  return -ENODEV;
2942 
2943  ret = uncore_pci_init();
2944  if (ret)
2945  goto fail;
2946  ret = uncore_cpu_init();
2947  if (ret) {
2948  uncore_pci_exit();
2949  goto fail;
2950  }
2951 
2952  uncore_pmus_register();
2953  return 0;
2954 fail:
2955  return ret;
2956 }
2957 device_initcall(intel_uncore_init);