38 #include <linux/export.h>
39 #include <xen/swiotlb-xen.h>
41 #include <xen/xen-ops.h>
49 static char *xen_io_tlb_start, *xen_io_tlb_end;
50 static unsigned long xen_io_tlb_nslabs;
55 static u64 start_dma_addr;
59 return phys_to_machine(
XPADDR(paddr)).maddr;
64 return machine_to_phys(
XMADDR(baddr)).paddr;
72 static int check_pages_physically_contiguous(
unsigned long pfn,
76 unsigned long next_mfn;
83 for (i = 1; i < nr_pages; i++) {
97 if (check_pages_physically_contiguous(pfn, offset, size))
104 unsigned long mfn =
PFN_DOWN(dma_addr);
105 unsigned long pfn = mfn_to_local_pfn(mfn);
120 static int max_dma_bits = 32;
123 xen_swiotlb_fixup(
void *
buf,
size_t size,
unsigned long nslabs)
139 }
while (rc && dma_bits++ < max_dma_bits);
144 }
while (i < nslabs);
147 static unsigned long xen_set_nslabs(
unsigned long nr_tbl)
153 xen_io_tlb_nslabs = nr_tbl;
168 return "Cannot allocate Xen-SWIOTLB buffer\n";
170 return "Failed to get contiguous memory for DMA from Xen!\n"\
171 "You either: don't have the permissions, do not have"\
172 " enough free memory under 4GB, or the hypervisor memory"\
173 " is too fragmented!";
184 unsigned int repeat = 3;
188 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
196 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
197 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
200 if (xen_io_tlb_start)
205 pr_warn(
"Warning: only able to allocate %ld MB "
206 "for software IO TLB\n", (
PAGE_SIZE << order) >> 20);
211 if (!xen_io_tlb_start) {
215 xen_io_tlb_end = xen_io_tlb_start +
bytes;
219 rc = xen_swiotlb_fixup(xen_io_tlb_start,
226 free_pages((
unsigned long)xen_io_tlb_start, order);
227 xen_io_tlb_start =
NULL;
232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
241 xen_io_tlb_nslabs =
max(1024
UL,
242 (xen_io_tlb_nslabs >> 1));
247 pr_err(
"%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
249 panic(
"%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
251 free_pages((
unsigned long)xen_io_tlb_start, order);
278 ret = (
void *)vstart;
284 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
287 dev_addr = xen_phys_to_bus(phys);
288 if (((dev_addr + size - 1 <= dma_mask)) &&
289 !range_straddles_page_boundary(phys, size))
290 *dma_handle = dev_addr;
293 fls64(dma_mask)) != 0) {
320 if (((dev_addr + size - 1 > dma_mask)) ||
321 range_straddles_page_boundary(phys, size))
337 unsigned long offset,
size_t size,
352 !range_straddles_page_boundary(phys, size) && !
swiotlb_force)
362 dev_addr = xen_virt_to_bus(map);
391 if (is_xen_swiotlb_buffer(dev_addr)) {
412 xen_unmap_single(hwdev, dev_addr, size, dir);
436 if (is_xen_swiotlb_buffer(dev_addr)) {
452 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir,
SYNC_FOR_CPU);
496 range_straddles_page_boundary(paddr, sg->
length)) {
506 sgl[0].dma_length = 0;
512 sg->dma_length = sg->
length;
533 xen_unmap_single(hwdev, sg->
dma_address, sg->dma_length, dir);
555 sg->dma_length, dir, target);
562 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir,
SYNC_FOR_CPU);
590 return xen_virt_to_bus(xen_io_tlb_end - 1) <=
mask;