Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pci_sun4v.c
Go to the documentation of this file.
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007, 2008 David S. Miller ([email protected])
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/export.h>
16 #include <linux/log2.h>
17 #include <linux/of_device.h>
18 
19 #include <asm/iommu.h>
20 #include <asm/irq.h>
21 #include <asm/hypervisor.h>
22 #include <asm/prom.h>
23 
24 #include "pci_impl.h"
25 #include "iommu_common.h"
26 
27 #include "pci_sun4v.h"
28 
29 #define DRIVER_NAME "pci_sun4v"
30 #define PFX DRIVER_NAME ": "
31 
32 static unsigned long vpci_major = 1;
33 static unsigned long vpci_minor = 1;
34 
35 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
36 
37 struct iommu_batch {
38  struct device *dev; /* Device mapping is for. */
39  unsigned long prot; /* IOMMU page protections */
40  unsigned long entry; /* Index into IOTSB. */
41  u64 *pglist; /* List of physical pages */
42  unsigned long npages; /* Number of pages in list. */
43 };
44 
45 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
46 static int iommu_batch_initialized;
47 
48 /* Interrupts must be disabled. */
49 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
50 {
52 
53  p->dev = dev;
54  p->prot = prot;
55  p->entry = entry;
56  p->npages = 0;
57 }
58 
59 /* Interrupts must be disabled. */
60 static long iommu_batch_flush(struct iommu_batch *p)
61 {
62  struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
63  unsigned long devhandle = pbm->devhandle;
64  unsigned long prot = p->prot;
65  unsigned long entry = p->entry;
66  u64 *pglist = p->pglist;
67  unsigned long npages = p->npages;
68 
69  while (npages != 0) {
70  long num;
71 
72  num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
73  npages, prot, __pa(pglist));
74  if (unlikely(num < 0)) {
75  if (printk_ratelimit())
76  printk("iommu_batch_flush: IOMMU map of "
77  "[%08lx:%08llx:%lx:%lx:%lx] failed with "
78  "status %ld\n",
79  devhandle, HV_PCI_TSBID(0, entry),
80  npages, prot, __pa(pglist), num);
81  return -1;
82  }
83 
84  entry += num;
85  npages -= num;
86  pglist += num;
87  }
88 
89  p->entry = entry;
90  p->npages = 0;
91 
92  return 0;
93 }
94 
95 static inline void iommu_batch_new_entry(unsigned long entry)
96 {
98 
99  if (p->entry + p->npages == entry)
100  return;
101  if (p->entry != ~0UL)
102  iommu_batch_flush(p);
103  p->entry = entry;
104 }
105 
106 /* Interrupts must be disabled. */
107 static inline long iommu_batch_add(u64 phys_page)
108 {
109  struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110 
111  BUG_ON(p->npages >= PGLIST_NENTS);
112 
113  p->pglist[p->npages++] = phys_page;
114  if (p->npages == PGLIST_NENTS)
115  return iommu_batch_flush(p);
116 
117  return 0;
118 }
119 
120 /* Interrupts must be disabled. */
121 static inline long iommu_batch_end(void)
122 {
123  struct iommu_batch *p = &__get_cpu_var(iommu_batch);
124 
125  BUG_ON(p->npages >= PGLIST_NENTS);
126 
127  return iommu_batch_flush(p);
128 }
129 
130 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
131  dma_addr_t *dma_addrp, gfp_t gfp,
132  struct dma_attrs *attrs)
133 {
134  unsigned long flags, order, first_page, npages, n;
135  struct iommu *iommu;
136  struct page *page;
137  void *ret;
138  long entry;
139  int nid;
140 
141  size = IO_PAGE_ALIGN(size);
142  order = get_order(size);
143  if (unlikely(order >= MAX_ORDER))
144  return NULL;
145 
146  npages = size >> IO_PAGE_SHIFT;
147 
148  nid = dev->archdata.numa_node;
149  page = alloc_pages_node(nid, gfp, order);
150  if (unlikely(!page))
151  return NULL;
152 
153  first_page = (unsigned long) page_address(page);
154  memset((char *)first_page, 0, PAGE_SIZE << order);
155 
156  iommu = dev->archdata.iommu;
157 
158  spin_lock_irqsave(&iommu->lock, flags);
159  entry = iommu_range_alloc(dev, iommu, npages, NULL);
160  spin_unlock_irqrestore(&iommu->lock, flags);
161 
162  if (unlikely(entry == DMA_ERROR_CODE))
163  goto range_alloc_fail;
164 
165  *dma_addrp = (iommu->page_table_map_base +
166  (entry << IO_PAGE_SHIFT));
167  ret = (void *) first_page;
168  first_page = __pa(first_page);
169 
170  local_irq_save(flags);
171 
172  iommu_batch_start(dev,
175  entry);
176 
177  for (n = 0; n < npages; n++) {
178  long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
179  if (unlikely(err < 0L))
180  goto iommu_map_fail;
181  }
182 
183  if (unlikely(iommu_batch_end() < 0L))
184  goto iommu_map_fail;
185 
186  local_irq_restore(flags);
187 
188  return ret;
189 
190 iommu_map_fail:
191  /* Interrupts are disabled. */
192  spin_lock(&iommu->lock);
193  iommu_range_free(iommu, *dma_addrp, npages);
194  spin_unlock_irqrestore(&iommu->lock, flags);
195 
196 range_alloc_fail:
197  free_pages(first_page, order);
198  return NULL;
199 }
200 
201 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
202  dma_addr_t dvma, struct dma_attrs *attrs)
203 {
204  struct pci_pbm_info *pbm;
205  struct iommu *iommu;
206  unsigned long flags, order, npages, entry;
207  u32 devhandle;
208 
209  npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
210  iommu = dev->archdata.iommu;
211  pbm = dev->archdata.host_controller;
212  devhandle = pbm->devhandle;
213  entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
214 
215  spin_lock_irqsave(&iommu->lock, flags);
216 
217  iommu_range_free(iommu, dvma, npages);
218 
219  do {
220  unsigned long num;
221 
222  num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
223  npages);
224  entry += num;
225  npages -= num;
226  } while (npages != 0);
227 
228  spin_unlock_irqrestore(&iommu->lock, flags);
229 
230  order = get_order(size);
231  if (order < 10)
232  free_pages((unsigned long)cpu, order);
233 }
234 
235 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
236  unsigned long offset, size_t sz,
238  struct dma_attrs *attrs)
239 {
240  struct iommu *iommu;
241  unsigned long flags, npages, oaddr;
242  unsigned long i, base_paddr;
243  u32 bus_addr, ret;
244  unsigned long prot;
245  long entry;
246 
247  iommu = dev->archdata.iommu;
248 
249  if (unlikely(direction == DMA_NONE))
250  goto bad;
251 
252  oaddr = (unsigned long)(page_address(page) + offset);
253  npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
254  npages >>= IO_PAGE_SHIFT;
255 
256  spin_lock_irqsave(&iommu->lock, flags);
257  entry = iommu_range_alloc(dev, iommu, npages, NULL);
258  spin_unlock_irqrestore(&iommu->lock, flags);
259 
260  if (unlikely(entry == DMA_ERROR_CODE))
261  goto bad;
262 
263  bus_addr = (iommu->page_table_map_base +
264  (entry << IO_PAGE_SHIFT));
265  ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
266  base_paddr = __pa(oaddr & IO_PAGE_MASK);
267  prot = HV_PCI_MAP_ATTR_READ;
268  if (direction != DMA_TO_DEVICE)
269  prot |= HV_PCI_MAP_ATTR_WRITE;
270 
271  local_irq_save(flags);
272 
273  iommu_batch_start(dev, prot, entry);
274 
275  for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
276  long err = iommu_batch_add(base_paddr);
277  if (unlikely(err < 0L))
278  goto iommu_map_fail;
279  }
280  if (unlikely(iommu_batch_end() < 0L))
281  goto iommu_map_fail;
282 
283  local_irq_restore(flags);
284 
285  return ret;
286 
287 bad:
288  if (printk_ratelimit())
289  WARN_ON(1);
290  return DMA_ERROR_CODE;
291 
292 iommu_map_fail:
293  /* Interrupts are disabled. */
294  spin_lock(&iommu->lock);
295  iommu_range_free(iommu, bus_addr, npages);
296  spin_unlock_irqrestore(&iommu->lock, flags);
297 
298  return DMA_ERROR_CODE;
299 }
300 
301 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
302  size_t sz, enum dma_data_direction direction,
303  struct dma_attrs *attrs)
304 {
305  struct pci_pbm_info *pbm;
306  struct iommu *iommu;
307  unsigned long flags, npages;
308  long entry;
309  u32 devhandle;
310 
311  if (unlikely(direction == DMA_NONE)) {
312  if (printk_ratelimit())
313  WARN_ON(1);
314  return;
315  }
316 
317  iommu = dev->archdata.iommu;
318  pbm = dev->archdata.host_controller;
319  devhandle = pbm->devhandle;
320 
321  npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
322  npages >>= IO_PAGE_SHIFT;
323  bus_addr &= IO_PAGE_MASK;
324 
325  spin_lock_irqsave(&iommu->lock, flags);
326 
327  iommu_range_free(iommu, bus_addr, npages);
328 
329  entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
330  do {
331  unsigned long num;
332 
333  num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
334  npages);
335  entry += num;
336  npages -= num;
337  } while (npages != 0);
338 
339  spin_unlock_irqrestore(&iommu->lock, flags);
340 }
341 
342 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
343  int nelems, enum dma_data_direction direction,
344  struct dma_attrs *attrs)
345 {
346  struct scatterlist *s, *outs, *segstart;
347  unsigned long flags, handle, prot;
348  dma_addr_t dma_next = 0, dma_addr;
349  unsigned int max_seg_size;
350  unsigned long seg_boundary_size;
351  int outcount, incount, i;
352  struct iommu *iommu;
353  unsigned long base_shift;
354  long err;
355 
356  BUG_ON(direction == DMA_NONE);
357 
358  iommu = dev->archdata.iommu;
359  if (nelems == 0 || !iommu)
360  return 0;
361 
362  prot = HV_PCI_MAP_ATTR_READ;
363  if (direction != DMA_TO_DEVICE)
364  prot |= HV_PCI_MAP_ATTR_WRITE;
365 
366  outs = s = segstart = &sglist[0];
367  outcount = 1;
368  incount = nelems;
369  handle = 0;
370 
371  /* Init first segment length for backout at failure */
372  outs->dma_length = 0;
373 
374  spin_lock_irqsave(&iommu->lock, flags);
375 
376  iommu_batch_start(dev, prot, ~0UL);
377 
378  max_seg_size = dma_get_max_seg_size(dev);
379  seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
381  base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
382  for_each_sg(sglist, s, nelems, i) {
383  unsigned long paddr, npages, entry, out_entry = 0, slen;
384 
385  slen = s->length;
386  /* Sanity check */
387  if (slen == 0) {
388  dma_next = 0;
389  continue;
390  }
391  /* Allocate iommu entries for that segment */
392  paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
393  npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
394  entry = iommu_range_alloc(dev, iommu, npages, &handle);
395 
396  /* Handle failure */
397  if (unlikely(entry == DMA_ERROR_CODE)) {
398  if (printk_ratelimit())
399  printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
400  " npages %lx\n", iommu, paddr, npages);
401  goto iommu_map_failed;
402  }
403 
404  iommu_batch_new_entry(entry);
405 
406  /* Convert entry to a dma_addr_t */
407  dma_addr = iommu->page_table_map_base +
408  (entry << IO_PAGE_SHIFT);
409  dma_addr |= (s->offset & ~IO_PAGE_MASK);
410 
411  /* Insert into HW table */
412  paddr &= IO_PAGE_MASK;
413  while (npages--) {
414  err = iommu_batch_add(paddr);
415  if (unlikely(err < 0L))
416  goto iommu_map_failed;
417  paddr += IO_PAGE_SIZE;
418  }
419 
420  /* If we are in an open segment, try merging */
421  if (segstart != s) {
422  /* We cannot merge if:
423  * - allocated dma_addr isn't contiguous to previous allocation
424  */
425  if ((dma_addr != dma_next) ||
426  (outs->dma_length + s->length > max_seg_size) ||
427  (is_span_boundary(out_entry, base_shift,
428  seg_boundary_size, outs, s))) {
429  /* Can't merge: create a new segment */
430  segstart = s;
431  outcount++;
432  outs = sg_next(outs);
433  } else {
434  outs->dma_length += s->length;
435  }
436  }
437 
438  if (segstart == s) {
439  /* This is a new segment, fill entries */
440  outs->dma_address = dma_addr;
441  outs->dma_length = slen;
442  out_entry = entry;
443  }
444 
445  /* Calculate next page pointer for contiguous check */
446  dma_next = dma_addr + slen;
447  }
448 
449  err = iommu_batch_end();
450 
451  if (unlikely(err < 0L))
452  goto iommu_map_failed;
453 
454  spin_unlock_irqrestore(&iommu->lock, flags);
455 
456  if (outcount < incount) {
457  outs = sg_next(outs);
458  outs->dma_address = DMA_ERROR_CODE;
459  outs->dma_length = 0;
460  }
461 
462  return outcount;
463 
464 iommu_map_failed:
465  for_each_sg(sglist, s, nelems, i) {
466  if (s->dma_length != 0) {
467  unsigned long vaddr, npages;
468 
469  vaddr = s->dma_address & IO_PAGE_MASK;
470  npages = iommu_num_pages(s->dma_address, s->dma_length,
471  IO_PAGE_SIZE);
472  iommu_range_free(iommu, vaddr, npages);
473  /* XXX demap? XXX */
475  s->dma_length = 0;
476  }
477  if (s == outs)
478  break;
479  }
480  spin_unlock_irqrestore(&iommu->lock, flags);
481 
482  return 0;
483 }
484 
485 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
486  int nelems, enum dma_data_direction direction,
487  struct dma_attrs *attrs)
488 {
489  struct pci_pbm_info *pbm;
490  struct scatterlist *sg;
491  struct iommu *iommu;
492  unsigned long flags;
493  u32 devhandle;
494 
495  BUG_ON(direction == DMA_NONE);
496 
497  iommu = dev->archdata.iommu;
498  pbm = dev->archdata.host_controller;
499  devhandle = pbm->devhandle;
500 
501  spin_lock_irqsave(&iommu->lock, flags);
502 
503  sg = sglist;
504  while (nelems--) {
506  unsigned int len = sg->dma_length;
507  unsigned long npages, entry;
508 
509  if (!len)
510  break;
511  npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
512  iommu_range_free(iommu, dma_handle, npages);
513 
514  entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
515  while (npages) {
516  unsigned long num;
517 
518  num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
519  npages);
520  entry += num;
521  npages -= num;
522  }
523 
524  sg = sg_next(sg);
525  }
526 
527  spin_unlock_irqrestore(&iommu->lock, flags);
528 }
529 
530 static struct dma_map_ops sun4v_dma_ops = {
531  .alloc = dma_4v_alloc_coherent,
532  .free = dma_4v_free_coherent,
533  .map_page = dma_4v_map_page,
534  .unmap_page = dma_4v_unmap_page,
535  .map_sg = dma_4v_map_sg,
536  .unmap_sg = dma_4v_unmap_sg,
537 };
538 
539 static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
540  struct device *parent)
541 {
542  struct property *prop;
543  struct device_node *dp;
544 
545  dp = pbm->op->dev.of_node;
546  prop = of_find_property(dp, "66mhz-capable", NULL);
547  pbm->is_66mhz_capable = (prop != NULL);
548  pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
549 
550  /* XXX register error interrupt handlers XXX */
551 }
552 
553 static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm,
554  struct iommu *iommu)
555 {
556  struct iommu_arena *arena = &iommu->arena;
557  unsigned long i, cnt = 0;
558  u32 devhandle;
559 
560  devhandle = pbm->devhandle;
561  for (i = 0; i < arena->limit; i++) {
562  unsigned long ret, io_attrs, ra;
563 
564  ret = pci_sun4v_iommu_getmap(devhandle,
565  HV_PCI_TSBID(0, i),
566  &io_attrs, &ra);
567  if (ret == HV_EOK) {
568  if (page_in_phys_avail(ra)) {
569  pci_sun4v_iommu_demap(devhandle,
570  HV_PCI_TSBID(0, i), 1);
571  } else {
572  cnt++;
573  __set_bit(i, arena->map);
574  }
575  }
576  }
577 
578  return cnt;
579 }
580 
581 static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
582 {
583  static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
584  struct iommu *iommu = pbm->iommu;
585  unsigned long num_tsb_entries, sz;
586  u32 dma_mask, dma_offset;
587  const u32 *vdma;
588 
589  vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
590  if (!vdma)
591  vdma = vdma_default;
592 
593  if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
594  printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
595  vdma[0], vdma[1]);
596  return -EINVAL;
597  }
598 
599  dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
600  num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
601 
602  dma_offset = vdma[0];
603 
604  /* Setup initial software IOMMU state. */
605  spin_lock_init(&iommu->lock);
606  iommu->ctx_lowest_free = 1;
607  iommu->page_table_map_base = dma_offset;
608  iommu->dma_addr_mask = dma_mask;
609 
610  /* Allocate and initialize the free area map. */
611  sz = (num_tsb_entries + 7) / 8;
612  sz = (sz + 7UL) & ~7UL;
613  iommu->arena.map = kzalloc(sz, GFP_KERNEL);
614  if (!iommu->arena.map) {
615  printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
616  return -ENOMEM;
617  }
618  iommu->arena.limit = num_tsb_entries;
619 
620  sz = probe_existing_entries(pbm, iommu);
621  if (sz)
622  printk("%s: Imported %lu TSB entries from OBP\n",
623  pbm->name, sz);
624 
625  return 0;
626 }
627 
628 #ifdef CONFIG_PCI_MSI
629 struct pci_sun4v_msiq_entry {
630  u64 version_type;
631 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
632 #define MSIQ_VERSION_SHIFT 32
633 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
634 #define MSIQ_TYPE_SHIFT 0
635 #define MSIQ_TYPE_NONE 0x00
636 #define MSIQ_TYPE_MSG 0x01
637 #define MSIQ_TYPE_MSI32 0x02
638 #define MSIQ_TYPE_MSI64 0x03
639 #define MSIQ_TYPE_INTX 0x08
640 #define MSIQ_TYPE_NONE2 0xff
641 
642  u64 intx_sysino;
643  u64 reserved1;
644  u64 stick;
645  u64 req_id; /* bus/device/func */
646 #define MSIQ_REQID_BUS_MASK 0xff00UL
647 #define MSIQ_REQID_BUS_SHIFT 8
648 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
649 #define MSIQ_REQID_DEVICE_SHIFT 3
650 #define MSIQ_REQID_FUNC_MASK 0x0007UL
651 #define MSIQ_REQID_FUNC_SHIFT 0
652 
653  u64 msi_address;
654 
655  /* The format of this value is message type dependent.
656  * For MSI bits 15:0 are the data from the MSI packet.
657  * For MSI-X bits 31:0 are the data from the MSI packet.
658  * For MSG, the message code and message routing code where:
659  * bits 39:32 is the bus/device/fn of the msg target-id
660  * bits 18:16 is the message routing code
661  * bits 7:0 is the message code
662  * For INTx the low order 2-bits are:
663  * 00 - INTA
664  * 01 - INTB
665  * 10 - INTC
666  * 11 - INTD
667  */
668  u64 msi_data;
669 
670  u64 reserved2;
671 };
672 
673 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
674  unsigned long *head)
675 {
676  unsigned long err, limit;
677 
678  err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
679  if (unlikely(err))
680  return -ENXIO;
681 
682  limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
683  if (unlikely(*head >= limit))
684  return -EFBIG;
685 
686  return 0;
687 }
688 
689 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
690  unsigned long msiqid, unsigned long *head,
691  unsigned long *msi)
692 {
693  struct pci_sun4v_msiq_entry *ep;
694  unsigned long err, type;
695 
696  /* Note: void pointer arithmetic, 'head' is a byte offset */
697  ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
698  (pbm->msiq_ent_count *
699  sizeof(struct pci_sun4v_msiq_entry))) +
700  *head);
701 
702  if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
703  return 0;
704 
705  type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
706  if (unlikely(type != MSIQ_TYPE_MSI32 &&
707  type != MSIQ_TYPE_MSI64))
708  return -EINVAL;
709 
710  *msi = ep->msi_data;
711 
713  ep->msi_data /* msi_num */,
715  if (unlikely(err))
716  return -ENXIO;
717 
718  /* Clear the entry. */
719  ep->version_type &= ~MSIQ_TYPE_MASK;
720 
721  (*head) += sizeof(struct pci_sun4v_msiq_entry);
722  if (*head >=
723  (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
724  *head = 0;
725 
726  return 1;
727 }
728 
729 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
730  unsigned long head)
731 {
732  unsigned long err;
733 
734  err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
735  if (unlikely(err))
736  return -EINVAL;
737 
738  return 0;
739 }
740 
741 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
742  unsigned long msi, int is_msi64)
743 {
744  if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
745  (is_msi64 ?
747  return -ENXIO;
749  return -ENXIO;
751  return -ENXIO;
752  return 0;
753 }
754 
755 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
756 {
757  unsigned long err, msiqid;
758 
759  err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
760  if (err)
761  return -ENXIO;
762 
764 
765  return 0;
766 }
767 
768 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
769 {
770  unsigned long q_size, alloc_size, pages, order;
771  int i;
772 
773  q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
774  alloc_size = (pbm->msiq_num * q_size);
775  order = get_order(alloc_size);
776  pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
777  if (pages == 0UL) {
778  printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
779  order);
780  return -ENOMEM;
781  }
782  memset((char *)pages, 0, PAGE_SIZE << order);
783  pbm->msi_queues = (void *) pages;
784 
785  for (i = 0; i < pbm->msiq_num; i++) {
786  unsigned long err, base = __pa(pages + (i * q_size));
787  unsigned long ret1, ret2;
788 
789  err = pci_sun4v_msiq_conf(pbm->devhandle,
790  pbm->msiq_first + i,
791  base, pbm->msiq_ent_count);
792  if (err) {
793  printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
794  err);
795  goto h_error;
796  }
797 
798  err = pci_sun4v_msiq_info(pbm->devhandle,
799  pbm->msiq_first + i,
800  &ret1, &ret2);
801  if (err) {
802  printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
803  err);
804  goto h_error;
805  }
806  if (ret1 != base || ret2 != pbm->msiq_ent_count) {
807  printk(KERN_ERR "MSI: Bogus qconf "
808  "expected[%lx:%x] got[%lx:%lx]\n",
809  base, pbm->msiq_ent_count,
810  ret1, ret2);
811  goto h_error;
812  }
813  }
814 
815  return 0;
816 
817 h_error:
818  free_pages(pages, order);
819  return -EINVAL;
820 }
821 
822 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
823 {
824  unsigned long q_size, alloc_size, pages, order;
825  int i;
826 
827  for (i = 0; i < pbm->msiq_num; i++) {
828  unsigned long msiqid = pbm->msiq_first + i;
829 
830  (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
831  }
832 
833  q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
834  alloc_size = (pbm->msiq_num * q_size);
835  order = get_order(alloc_size);
836 
837  pages = (unsigned long) pbm->msi_queues;
838 
839  free_pages(pages, order);
840 
841  pbm->msi_queues = NULL;
842 }
843 
844 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
845  unsigned long msiqid,
846  unsigned long devino)
847 {
848  unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
849 
850  if (!irq)
851  return -ENOMEM;
852 
854  return -EINVAL;
856  return -EINVAL;
857 
858  return irq;
859 }
860 
861 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
862  .get_head = pci_sun4v_get_head,
863  .dequeue_msi = pci_sun4v_dequeue_msi,
864  .set_head = pci_sun4v_set_head,
865  .msi_setup = pci_sun4v_msi_setup,
866  .msi_teardown = pci_sun4v_msi_teardown,
867  .msiq_alloc = pci_sun4v_msiq_alloc,
868  .msiq_free = pci_sun4v_msiq_free,
869  .msiq_build_irq = pci_sun4v_msiq_build_irq,
870 };
871 
872 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
873 {
874  sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
875 }
876 #else /* CONFIG_PCI_MSI */
877 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
878 {
879 }
880 #endif /* !(CONFIG_PCI_MSI) */
881 
882 static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
883  struct platform_device *op, u32 devhandle)
884 {
885  struct device_node *dp = op->dev.of_node;
886  int err;
887 
888  pbm->numa_node = of_node_to_nid(dp);
889 
890  pbm->pci_ops = &sun4v_pci_ops;
891  pbm->config_space_reg_bits = 12;
892 
893  pbm->index = pci_num_pbms++;
894 
895  pbm->op = op;
896 
897  pbm->devhandle = devhandle;
898 
899  pbm->name = dp->full_name;
900 
901  printk("%s: SUN4V PCI Bus Module\n", pbm->name);
902  printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
903 
905 
906  pci_get_pbm_props(pbm);
907 
908  err = pci_sun4v_iommu_init(pbm);
909  if (err)
910  return err;
911 
912  pci_sun4v_msi_init(pbm);
913 
914  pci_sun4v_scan_bus(pbm, &op->dev);
915 
916  pbm->next = pci_pbm_root;
917  pci_pbm_root = pbm;
918 
919  return 0;
920 }
921 
922 static int __devinit pci_sun4v_probe(struct platform_device *op)
923 {
924  const struct linux_prom64_registers *regs;
925  static int hvapi_negotiated = 0;
926  struct pci_pbm_info *pbm;
927  struct device_node *dp;
928  struct iommu *iommu;
929  u32 devhandle;
930  int i, err;
931 
932  dp = op->dev.of_node;
933 
934  if (!hvapi_negotiated++) {
936  vpci_major,
937  &vpci_minor);
938 
939  if (err) {
940  printk(KERN_ERR PFX "Could not register hvapi, "
941  "err=%d\n", err);
942  return err;
943  }
944  printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
945  vpci_major, vpci_minor);
946 
947  dma_ops = &sun4v_dma_ops;
948  }
949 
950  regs = of_get_property(dp, "reg", NULL);
951  err = -ENODEV;
952  if (!regs) {
953  printk(KERN_ERR PFX "Could not find config registers\n");
954  goto out_err;
955  }
956  devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
957 
958  err = -ENOMEM;
959  if (!iommu_batch_initialized) {
961  unsigned long page = get_zeroed_page(GFP_KERNEL);
962 
963  if (!page)
964  goto out_err;
965 
966  per_cpu(iommu_batch, i).pglist = (u64 *) page;
967  }
968  iommu_batch_initialized = 1;
969  }
970 
971  pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
972  if (!pbm) {
973  printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
974  goto out_err;
975  }
976 
977  iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
978  if (!iommu) {
979  printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
980  goto out_free_controller;
981  }
982 
983  pbm->iommu = iommu;
984 
985  err = pci_sun4v_pbm_init(pbm, op, devhandle);
986  if (err)
987  goto out_free_iommu;
988 
989  dev_set_drvdata(&op->dev, pbm);
990 
991  return 0;
992 
993 out_free_iommu:
994  kfree(pbm->iommu);
995 
996 out_free_controller:
997  kfree(pbm);
998 
999 out_err:
1000  return err;
1001 }
1002 
1003 static const struct of_device_id pci_sun4v_match[] = {
1004  {
1005  .name = "pci",
1006  .compatible = "SUNW,sun4v-pci",
1007  },
1008  {},
1009 };
1010 
1011 static struct platform_driver pci_sun4v_driver = {
1012  .driver = {
1013  .name = DRIVER_NAME,
1014  .owner = THIS_MODULE,
1015  .of_match_table = pci_sun4v_match,
1016  },
1017  .probe = pci_sun4v_probe,
1018 };
1019 
1020 static int __init pci_sun4v_init(void)
1021 {
1022  return platform_driver_register(&pci_sun4v_driver);
1023 }
1024 
1025 subsys_initcall(pci_sun4v_init);