6 #include <linux/kernel.h>
7 #include <linux/types.h>
10 #include <linux/slab.h>
15 #include <linux/export.h>
19 #include <asm/iommu.h>
21 #include <asm/hypervisor.h>
29 #define DRIVER_NAME "pci_sun4v"
30 #define PFX DRIVER_NAME ": "
32 static unsigned long vpci_major = 1;
33 static unsigned long vpci_minor = 1;
35 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
46 static int iommu_batch_initialized;
49 static inline void iommu_batch_start(
struct device *
dev,
unsigned long prot,
unsigned long entry)
64 unsigned long prot = p->
prot;
67 unsigned long npages = p->
npages;
73 npages, prot,
__pa(pglist));
75 if (printk_ratelimit())
76 printk(
"iommu_batch_flush: IOMMU map of "
77 "[%08lx:%08llx:%lx:%lx:%lx] failed with "
80 npages, prot,
__pa(pglist), num);
95 static inline void iommu_batch_new_entry(
unsigned long entry)
102 iommu_batch_flush(p);
107 static inline long iommu_batch_add(
u64 phys_page)
115 return iommu_batch_flush(p);
121 static inline long iommu_batch_end(
void)
127 return iommu_batch_flush(p);
130 static void *dma_4v_alloc_coherent(
struct device *
dev,
size_t size,
149 page = alloc_pages_node(nid, gfp, order);
160 spin_unlock_irqrestore(&iommu->
lock, flags);
163 goto range_alloc_fail;
167 ret = (
void *) first_page;
168 first_page =
__pa(first_page);
172 iommu_batch_start(dev,
177 for (n = 0; n < npages; n++) {
178 long err = iommu_batch_add(first_page + (n *
PAGE_SIZE));
183 if (
unlikely(iommu_batch_end() < 0L))
192 spin_lock(&iommu->
lock);
194 spin_unlock_irqrestore(&iommu->
lock, flags);
201 static void dma_4v_free_coherent(
struct device *dev,
size_t size,
void *
cpu,
211 pbm = dev->
archdata.host_controller;
226 }
while (npages != 0);
228 spin_unlock_irqrestore(&iommu->
lock, flags);
236 unsigned long offset,
size_t sz,
241 unsigned long flags, npages, oaddr;
242 unsigned long i, base_paddr;
258 spin_unlock_irqrestore(&iommu->
lock, flags);
273 iommu_batch_start(dev, prot, entry);
275 for (i = 0; i < npages; i++, base_paddr +=
IO_PAGE_SIZE) {
276 long err = iommu_batch_add(base_paddr);
280 if (
unlikely(iommu_batch_end() < 0L))
288 if (printk_ratelimit())
294 spin_lock(&iommu->
lock);
296 spin_unlock_irqrestore(&iommu->
lock, flags);
307 unsigned long flags, npages;
312 if (printk_ratelimit())
318 pbm = dev->
archdata.host_controller;
337 }
while (npages != 0);
339 spin_unlock_irqrestore(&iommu->
lock, flags);
349 unsigned int max_seg_size;
350 unsigned long seg_boundary_size;
351 int outcount, incount,
i;
353 unsigned long base_shift;
359 if (nelems == 0 || !iommu)
366 outs = s = segstart = &sglist[0];
372 outs->dma_length = 0;
376 iommu_batch_start(dev, prot, ~0
UL);
378 max_seg_size = dma_get_max_seg_size(dev);
379 seg_boundary_size =
ALIGN(dma_get_seg_boundary(dev) + 1,
383 unsigned long paddr, npages,
entry, out_entry = 0, slen;
398 if (printk_ratelimit())
400 " npages %lx\n", iommu, paddr, npages);
401 goto iommu_map_failed;
404 iommu_batch_new_entry(entry);
414 err = iommu_batch_add(paddr);
416 goto iommu_map_failed;
426 (outs->dma_length + s->
length > max_seg_size) ||
427 (is_span_boundary(out_entry, base_shift,
428 seg_boundary_size, outs, s))) {
434 outs->dma_length += s->
length;
441 outs->dma_length = slen;
449 err = iommu_batch_end();
452 goto iommu_map_failed;
454 spin_unlock_irqrestore(&iommu->
lock, flags);
456 if (outcount < incount) {
459 outs->dma_length = 0;
466 if (s->dma_length != 0) {
467 unsigned long vaddr, npages;
470 npages = iommu_num_pages(s->
dma_address, s->dma_length,
480 spin_unlock_irqrestore(&iommu->
lock, flags);
498 pbm = dev->
archdata.host_controller;
506 unsigned int len = sg->dma_length;
507 unsigned long npages,
entry;
511 npages = iommu_num_pages(dma_handle, len,
IO_PAGE_SIZE);
527 spin_unlock_irqrestore(&iommu->
lock, flags);
531 .alloc = dma_4v_alloc_coherent,
532 .free = dma_4v_free_coherent,
533 .map_page = dma_4v_map_page,
534 .unmap_page = dma_4v_unmap_page,
535 .map_sg = dma_4v_map_sg,
536 .unmap_sg = dma_4v_unmap_sg,
545 dp = pbm->
op->dev.of_node;
557 unsigned long i,
cnt = 0;
561 for (i = 0; i < arena->
limit; i++) {
583 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
584 struct iommu *iommu = pbm->
iommu;
585 unsigned long num_tsb_entries, sz;
586 u32 dma_mask, dma_offset;
602 dma_offset = vdma[0];
611 sz = (num_tsb_entries + 7) / 8;
612 sz = (sz + 7
UL) & ~7
UL;
614 if (!iommu->
arena.map) {
618 iommu->
arena.limit = num_tsb_entries;
620 sz = probe_existing_entries(pbm, iommu);
622 printk(
"%s: Imported %lu TSB entries from OBP\n",
628 #ifdef CONFIG_PCI_MSI
629 struct pci_sun4v_msiq_entry {
631 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
632 #define MSIQ_VERSION_SHIFT 32
633 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
634 #define MSIQ_TYPE_SHIFT 0
635 #define MSIQ_TYPE_NONE 0x00
636 #define MSIQ_TYPE_MSG 0x01
637 #define MSIQ_TYPE_MSI32 0x02
638 #define MSIQ_TYPE_MSI64 0x03
639 #define MSIQ_TYPE_INTX 0x08
640 #define MSIQ_TYPE_NONE2 0xff
646 #define MSIQ_REQID_BUS_MASK 0xff00UL
647 #define MSIQ_REQID_BUS_SHIFT 8
648 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
649 #define MSIQ_REQID_DEVICE_SHIFT 3
650 #define MSIQ_REQID_FUNC_MASK 0x0007UL
651 #define MSIQ_REQID_FUNC_SHIFT 0
673 static int pci_sun4v_get_head(
struct pci_pbm_info *pbm,
unsigned long msiqid,
682 limit = pbm->msiq_ent_count *
sizeof(
struct pci_sun4v_msiq_entry);
689 static int pci_sun4v_dequeue_msi(
struct pci_pbm_info *pbm,
690 unsigned long msiqid,
unsigned long *head,
693 struct pci_sun4v_msiq_entry *ep;
697 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
698 (pbm->msiq_ent_count *
699 sizeof(
struct pci_sun4v_msiq_entry))) +
702 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
705 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
706 if (
unlikely(type != MSIQ_TYPE_MSI32 &&
707 type != MSIQ_TYPE_MSI64))
719 ep->version_type &= ~MSIQ_TYPE_MASK;
721 (*head) +=
sizeof(
struct pci_sun4v_msiq_entry);
723 (pbm->msiq_ent_count *
sizeof(
struct pci_sun4v_msiq_entry)))
729 static int pci_sun4v_set_head(
struct pci_pbm_info *pbm,
unsigned long msiqid,
741 static int pci_sun4v_msi_setup(
struct pci_pbm_info *pbm,
unsigned long msiqid,
742 unsigned long msi,
int is_msi64)
755 static int pci_sun4v_msi_teardown(
struct pci_pbm_info *pbm,
unsigned long msi)
757 unsigned long err, msiqid;
768 static int pci_sun4v_msiq_alloc(
struct pci_pbm_info *pbm)
770 unsigned long q_size, alloc_size,
pages,
order;
773 q_size = pbm->msiq_ent_count *
sizeof(
struct pci_sun4v_msiq_entry);
774 alloc_size = (pbm->msiq_num * q_size);
783 pbm->msi_queues = (
void *) pages;
785 for (i = 0; i < pbm->msiq_num; i++) {
786 unsigned long err,
base =
__pa(pages + (i * q_size));
787 unsigned long ret1, ret2;
791 base, pbm->msiq_ent_count);
806 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
808 "expected[%lx:%x] got[%lx:%lx]\n",
809 base, pbm->msiq_ent_count,
822 static void pci_sun4v_msiq_free(
struct pci_pbm_info *pbm)
824 unsigned long q_size, alloc_size,
pages,
order;
827 for (i = 0; i < pbm->msiq_num; i++) {
828 unsigned long msiqid = pbm->msiq_first +
i;
833 q_size = pbm->msiq_ent_count *
sizeof(
struct pci_sun4v_msiq_entry);
834 alloc_size = (pbm->msiq_num * q_size);
837 pages = (
unsigned long) pbm->msi_queues;
841 pbm->msi_queues =
NULL;
844 static int pci_sun4v_msiq_build_irq(
struct pci_pbm_info *pbm,
845 unsigned long msiqid,
846 unsigned long devino)
861 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
862 .get_head = pci_sun4v_get_head,
863 .dequeue_msi = pci_sun4v_dequeue_msi,
864 .set_head = pci_sun4v_set_head,
865 .msi_setup = pci_sun4v_msi_setup,
866 .msi_teardown = pci_sun4v_msi_teardown,
867 .msiq_alloc = pci_sun4v_msiq_alloc,
868 .msiq_free = pci_sun4v_msiq_free,
869 .msiq_build_irq = pci_sun4v_msiq_build_irq,
872 static void pci_sun4v_msi_init(
struct pci_pbm_info *pbm)
877 static void pci_sun4v_msi_init(
struct pci_pbm_info *pbm)
901 printk(
"%s: SUN4V PCI Bus Module\n", pbm->
name);
908 err = pci_sun4v_iommu_init(pbm);
912 pci_sun4v_msi_init(pbm);
914 pci_sun4v_scan_bus(pbm, &op->
dev);
925 static int hvapi_negotiated = 0;
932 dp = op->
dev.of_node;
934 if (!hvapi_negotiated++) {
945 vpci_major, vpci_minor);
956 devhandle = (regs->
phys_addr >> 32
UL) & 0x0fffffff;
959 if (!iommu_batch_initialized) {
968 iommu_batch_initialized = 1;
977 iommu = kzalloc(
sizeof(
struct iommu),
GFP_KERNEL);
980 goto out_free_controller;
985 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1006 .compatible =
"SUNW,sun4v-pci",
1015 .of_match_table = pci_sun4v_match,
1017 .probe = pci_sun4v_probe,
1020 static int __init pci_sun4v_init(
void)