10 #include <linux/kernel.h>
13 #include <linux/slab.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
27 #include <asm/iommu.h>
34 #define IOMMU_RNGE IOMMU_RNGE_256MB
35 #define IOMMU_START 0xF0000000
36 #define IOMMU_WINSIZE (256*1024*1024U)
37 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE)
43 static int viking_flush;
51 static unsigned int ioperm_noc;
54 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
60 unsigned int impl, vers;
66 prom_printf(
"Unable to allocate iommu structure\n");
78 tmp = iommu->
regs->control;
82 iommu_invalidate(iommu->
regs);
84 iommu->
end = 0xffffffff;
93 prom_printf(
"Unable to allocate iommu table [0x%lx]\n",
104 iommu_invalidate(iommu->
regs);
108 prom_printf(
"Unable to allocate iommu bitmap [%d]\n",
119 iommu->
usemap.num_colors = 1;
121 printk(
KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
125 op->
dev.archdata.iommu = iommu;
128 static int __init iommu_init(
void)
132 for_each_node_by_name(dp,
"iommu") {
146 static void iommu_flush_iotlb(
iopte_t *iopte,
unsigned int niopte)
151 start = (
unsigned long)iopte;
159 }
else if (viking_flush) {
177 unsigned int busa, busa0;
189 for (i = 0; i < npages; i++) {
191 iommu_invalidate_page(iommu->
regs, busa);
197 iommu_flush_iotlb(iopte0, npages);
202 static u32 iommu_get_scsi_one(
struct device *dev,
char *
vaddr,
unsigned int len)
212 busa = iommu_get_one(dev, page, npages);
216 static __u32 iommu_get_scsi_one_gflush(
struct device *dev,
char *vaddr,
unsigned long len)
219 return iommu_get_scsi_one(dev, vaddr, len);
222 static __u32 iommu_get_scsi_one_pflush(
struct device *dev,
char *vaddr,
unsigned long len)
226 while(page < ((
unsigned long)(vaddr + len))) {
230 return iommu_get_scsi_one(dev, vaddr, len);
242 sg->dma_length = sg->
length;
247 static void iommu_get_scsi_sgl_pflush(
struct device *dev,
struct scatterlist *sg,
int sz)
249 unsigned long page, oldpage = 0;
262 if ((page = (
unsigned long)
page_address(sg_page(sg))) != 0) {
263 for (i = 0; i <
n; i++) {
264 if (page != oldpage) {
273 sg->dma_length = sg->
length;
278 static void iommu_release_one(
struct device *dev,
u32 busa,
int npages)
284 BUG_ON(busa < iommu->start);
286 for (i = 0; i < npages; i++) {
288 iommu_invalidate_page(iommu->
regs, busa);
294 static void iommu_release_scsi_one(
struct device *dev,
__u32 vaddr,
unsigned long len)
301 iommu_release_one(dev, vaddr &
PAGE_MASK, npages);
304 static void iommu_release_scsi_sgl(
struct device *dev,
struct scatterlist *sg,
int sz)
320 unsigned long addr,
int len)
350 else if (viking_flush)
380 iommu_invalidate(iommu->
regs);
386 static void iommu_unmap_dma_area(
struct device *dev,
unsigned long busa,
int len)
403 iommu_invalidate(iommu->
regs);
409 .get_scsi_one = iommu_get_scsi_one_gflush,
410 .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
411 .release_scsi_one = iommu_release_scsi_one,
412 .release_scsi_sgl = iommu_release_scsi_sgl,
414 .map_dma_area = iommu_map_dma_area,
415 .unmap_dma_area = iommu_unmap_dma_area,
420 .get_scsi_one = iommu_get_scsi_one_pflush,
421 .get_scsi_sgl = iommu_get_scsi_sgl_pflush,
422 .release_scsi_one = iommu_release_scsi_one,
423 .release_scsi_sgl = iommu_release_scsi_sgl,
425 .map_dma_area = iommu_map_dma_area,
426 .unmap_dma_area = iommu_unmap_dma_area,