5 #include <linux/kernel.h>
10 #include <linux/export.h>
25 # define DBGA(args...) printk(KERN_DEBUG args)
27 # define DBGA(args...)
30 # define DBGA2(args...) printk(KERN_DEBUG args)
32 # define DBGA2(args...)
35 #define DEBUG_NODIRECT 0
37 #define ISA_DMA_MASK 0x00ffffff
39 static inline unsigned long
40 mk_iommu_pte(
unsigned long paddr)
59 unsigned long window_size,
unsigned long align)
74 #ifdef CONFIG_DISCONTIGMEM
78 printk(
"%s: couldn't allocate arena from node %d\n"
79 " falling back to system-wide allocation\n",
86 printk(
"%s: couldn't allocate arena ptes from node %d\n"
87 " falling back to system-wide allocation\n",
102 arena->
size = window_size;
114 unsigned long window_size,
unsigned long align)
128 unsigned long boundary_size;
132 boundary_size = dma_get_seg_boundary(dev) + 1;
145 while (i < n && p+i < nent) {
147 p =
ALIGN(p + 1, mask + 1);
152 p =
ALIGN(p + i + 1, mask + 1), i = 0;
163 alpha_mv.mv_pci_tbi(arena->
hose, 0, -1);
191 p = iommu_arena_find_pages(dev, arena, n, mask);
193 spin_unlock_irqrestore(&arena->
lock, flags);
201 for (i = 0; i <
n; ++
i)
205 spin_unlock_irqrestore(&arena->
lock, flags);
216 p = arena->
ptes + ofs;
217 for (i = 0; i <
n; ++
i)
225 static int pci_dac_dma_supported(
struct pci_dev *dev,
u64 mask)
227 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
235 if ((dac_offset & dev->
dma_mask) != dac_offset)
239 DBGA(
"pci_dac_dma_supported %s from %pf\n",
240 ok ?
"yes" :
"no", __builtin_return_address(0));
257 long npages, dma_ofs,
i;
260 unsigned int align = 0;
263 paddr =
__pa(cpu_addr);
271 DBGA2(
"pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
272 cpu_addr, size, ret, __builtin_return_address(0));
280 ret = paddr + alpha_mv.pci_dac_offset;
282 DBGA2(
"pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
283 cpu_addr, size, ret, __builtin_return_address(0));
291 if (! alpha_mv.mv_pci_tbi) {
296 arena = hose->sg_pci;
297 if (!arena || arena->
dma_base + arena->
size - 1 > max_dma)
298 arena = hose->sg_isa;
300 npages = iommu_num_pages(paddr, size,
PAGE_SIZE);
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
308 "could not allocate dma page tables\n");
313 for (i = 0; i < npages; ++
i, paddr +=
PAGE_SIZE)
314 arena->
ptes[i + dma_ofs] = mk_iommu_pte(paddr);
319 DBGA2(
"pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
320 cpu_addr, size, npages, ret, __builtin_return_address(0));
326 static struct pci_dev *alpha_gendev_to_pci(
struct device *dev)
350 unsigned long offset,
size_t size,
354 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
360 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->
dma_mask) : 0;
361 return pci_map_single_1(pdev, (
char *)
page_address(page) + offset,
376 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
379 long dma_ofs, npages;
384 if (dma_addr >= __direct_map_base
388 DBGA2(
"pci_unmap_single: direct [%llx,%zx] from %pf\n",
389 dma_addr, size, __builtin_return_address(0));
394 if (dma_addr > 0xffffffff) {
395 DBGA2(
"pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
396 dma_addr, size, __builtin_return_address(0));
400 arena = hose->sg_pci;
401 if (!arena || dma_addr < arena->
dma_base)
402 arena = hose->sg_isa;
405 if (dma_ofs * PAGE_SIZE >= arena->
size) {
407 " base %llx size %x\n",
413 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
417 iommu_arena_free(arena, dma_ofs, npages);
423 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
425 spin_unlock_irqrestore(&arena->
lock, flags);
427 DBGA2(
"pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
428 dma_addr, size, npages, __builtin_return_address(0));
436 static void *alpha_pci_alloc_coherent(
struct device *dev,
size_t size,
440 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
450 "get_free_pages failed from %pf\n",
451 __builtin_return_address(0));
456 memset(cpu_addr, 0, size);
458 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
459 if (*dma_addrp == 0) {
461 if (alpha_mv.mv_pci_tbi || (gfp &
GFP_DMA))
469 DBGA2(
"pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
470 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
481 static void alpha_pci_free_coherent(
struct device *dev,
size_t size,
485 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
489 DBGA2(
"pci_free_consistent: [%llx,%zx] from %pf\n",
490 dma_addr, size, __builtin_return_address(0));
502 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
503 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
509 unsigned long next_paddr;
511 long leader_flag, leader_length;
512 unsigned int max_seg_size;
516 leader_length = leader->
length;
520 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
521 for (++sg; sg <
end; ++
sg) {
526 if (leader_length + len > max_seg_size)
529 if (next_paddr == addr) {
531 leader_length += len;
532 }
else if (((next_paddr | addr) & ~
PAGE_MASK) == 0 && virt_ok) {
535 leader_length += len;
539 leader->dma_length = leader_length;
545 next_paddr = addr + len;
549 leader->dma_length = leader_length;
561 long size = leader->dma_length;
564 long npages, dma_ofs,
i;
570 && paddr + size + __direct_map_base - 1 <= max_dma
573 out->dma_length =
size;
575 DBGA(
" sg_fill: [%p,%lx] -> direct %llx\n",
584 out->
dma_address = paddr + alpha_mv.pci_dac_offset;
585 out->dma_length =
size;
587 DBGA(
" sg_fill: [%p,%lx] -> DAC %llx\n",
597 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
598 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
606 sg_classify(dev, leader, end, 0);
607 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
611 out->dma_length =
size;
613 DBGA(
" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
618 ptes = &arena->
ptes[dma_ofs];
628 while (sg+1 < end && (
int) sg[1].
dma_address == -1) {
633 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
636 for (i = 0; i < npages; ++
i, paddr +=
PAGE_SIZE)
637 *ptes++ = mk_iommu_pte(paddr);
640 DBGA(
" (%ld) [%p,%x] np %ld\n",
643 while (++last_sg <= sg) {
644 DBGA(
" (%ld) [%p,%x] cont\n",
658 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
668 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->
dma_mask) : 0;
672 sg->dma_length = sg->
length;
683 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
686 if (alpha_mv.mv_pci_tbi) {
689 arena = hose->sg_pci;
690 if (!arena || arena->
dma_base + arena->
size - 1 > max_dma)
691 arena = hose->sg_isa;
700 for (out = sg; sg <
end; ++
sg) {
703 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
712 if (out - start == 0)
714 DBGA(
"pci_map_sg: %ld entries\n", out - start);
720 "could not allocate dma page tables\n");
725 pci_unmap_sg(pdev, start, out - start, dir);
737 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
748 if (! alpha_mv.mv_pci_tbi)
753 arena = hose->sg_pci;
754 if (!arena || arena->
dma_base + arena->
size - 1 > max_dma)
755 arena = hose->sg_isa;
761 for (end = sg + nents; sg <
end; ++
sg) {
768 size = sg->dma_length;
772 if (addr > 0xffffffff) {
774 DBGA(
" (%ld) DAC [%llx,%zx]\n",
775 sg - end + nents, addr, size);
779 if (addr >= __direct_map_base
782 DBGA(
" (%ld) direct [%llx,%zx]\n",
783 sg - end + nents, addr, size);
787 DBGA(
" (%ld) sg [%llx,%zx]\n",
788 sg - end + nents, addr, size);
790 npages = iommu_num_pages(addr, size, PAGE_SIZE);
792 iommu_arena_free(arena, ofs, npages);
794 tend = addr + size - 1;
795 if (fbeg > addr) fbeg =
addr;
796 if (fend < tend) fend = tend;
803 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
805 spin_unlock_irqrestore(&arena->
lock, flags);
807 DBGA(
"pci_unmap_sg: %ld entries\n", nents - (end - sg));
813 static int alpha_pci_supported(
struct device *dev,
u64 mask)
815 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
829 arena = hose->sg_isa;
832 arena = hose->sg_pci;
854 if (!arena)
return -
EINVAL;
860 p = iommu_arena_find_pages(
NULL, arena, pg_count, align_mask);
862 spin_unlock_irqrestore(&arena->
lock, flags);
869 for (i = 0; i < pg_count; ++
i)
873 spin_unlock_irqrestore(&arena->
lock, flags);
884 if (!arena)
return -
EINVAL;
889 for(i = pg_start; i < pg_start + pg_count; i++)
893 iommu_arena_free(arena, pg_start, pg_count);
905 if (!arena)
return -
EINVAL;
911 for(j = pg_start; j < pg_start + pg_count; j++) {
913 spin_unlock_irqrestore(&arena->
lock, flags);
918 for(i = 0, j = pg_start; i < pg_count; i++, j++)
921 spin_unlock_irqrestore(&arena->
lock, flags);
932 if (!arena)
return -
EINVAL;
934 p = arena->
ptes + pg_start;
935 for(i = 0; i < pg_count; i++)
943 return dma_addr == 0;
946 static int alpha_pci_set_mask(
struct device *dev,
u64 mask)
949 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
957 .alloc = alpha_pci_alloc_coherent,
958 .free = alpha_pci_free_coherent,
959 .map_page = alpha_pci_map_page,
960 .unmap_page = alpha_pci_unmap_page,
961 .map_sg = alpha_pci_map_sg,
962 .unmap_sg = alpha_pci_unmap_sg,
963 .mapping_error = alpha_pci_mapping_error,
964 .dma_supported = alpha_pci_supported,
965 .set_dma_mask = alpha_pci_set_mask,