23 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
29 #include <linux/export.h>
31 #include <asm/cacheflush.h>
35 #include <asm/pgalloc.h>
36 #include <asm/uaccess.h>
37 #include <asm/tlbflush.h>
45 static char *pcxl_res_map;
46 static int pcxl_res_hint;
47 static int pcxl_res_size;
49 #ifdef DEBUG_PCXL_RESOURCE
50 #define DBG_RES(x...) printk(x)
62 void dump_resmap(
void)
64 u_long *res_ptr = (
unsigned long *)pcxl_res_map;
68 for(; i < (pcxl_res_size /
sizeof(
unsigned long)); ++
i, ++res_ptr)
69 printk(
"%08lx ", *res_ptr);
74 static inline void dump_resmap(
void) {;}
82 static inline int map_pte_uncached(
pte_t *
pte,
84 unsigned long size,
unsigned long *paddr_ptr)
87 unsigned long orig_vaddr =
vaddr;
106 }
while (vaddr < end);
110 static inline int map_pmd_uncached(
pmd_t *
pmd,
unsigned long vaddr,
111 unsigned long size,
unsigned long *paddr_ptr)
114 unsigned long orig_vaddr =
vaddr;
121 pte_t * pte = pte_alloc_kernel(pmd, vaddr);
124 if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
129 }
while (vaddr < end);
133 static inline int map_uncached_pages(
unsigned long vaddr,
unsigned long size,
137 unsigned long end = vaddr +
size;
146 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
150 }
while (vaddr && (vaddr < end));
154 static inline void unmap_uncached_pte(
pmd_t * pmd,
unsigned long vaddr,
159 unsigned long orig_vaddr =
vaddr;
187 }
while (vaddr < end);
190 static inline void unmap_uncached_pmd(
pgd_t * dir,
unsigned long vaddr,
195 unsigned long orig_vaddr =
vaddr;
210 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
214 }
while (vaddr < end);
217 static void unmap_uncached_pages(
unsigned long vaddr,
unsigned long size)
220 unsigned long end = vaddr +
size;
224 unmap_uncached_pmd(dir, vaddr, end - vaddr);
227 }
while (vaddr && (vaddr < end));
230 #define PCXL_SEARCH_LOOP(idx, mask, size) \
231 for(; res_ptr < res_end; ++res_ptr) \
233 if(0 == ((*res_ptr) & mask)) { \
235 idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
236 pcxl_res_hint = idx + (size >> 3); \
237 goto resource_found; \
241 #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
242 u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
243 u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
244 PCXL_SEARCH_LOOP(idx, mask, size); \
245 res_ptr = (u##size *)&pcxl_res_map[0]; \
246 PCXL_SEARCH_LOOP(idx, mask, size); \
254 unsigned int pages_needed = size >>
PAGE_SHIFT;
259 DBG_RES(
"pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
260 size, pages_needed, mask);
264 if(pages_needed <= 8) {
266 }
else if(pages_needed <= 16) {
268 }
else if(pages_needed <= 32) {
271 panic(
"%s: pcxl_alloc_range() Too many pages to map.\n",
276 panic(
"%s: pcxl_alloc_range() out of dma mapping resources\n",
281 DBG_RES(
"pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
282 res_idx, mask, pcxl_res_hint);
284 pcxl_used_pages += pages_needed;
285 pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
287 spin_unlock_irqrestore(&pcxl_res_lock, flags);
297 #define PCXL_FREE_MAPPINGS(idx, m, size) \
298 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
306 pcxl_free_range(
unsigned long vaddr,
size_t size)
310 unsigned int pages_mapped = size >>
PAGE_SHIFT;
315 DBG_RES(
"pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
316 res_idx, size, pages_mapped, mask);
320 if(pages_mapped <= 8) {
322 }
else if(pages_mapped <= 16) {
324 }
else if(pages_mapped <= 32) {
327 panic(
"%s: pcxl_free_range() Too many pages to unmap.\n",
331 pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
332 pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
334 spin_unlock_irqrestore(&pcxl_res_lock, flags);
339 static int proc_pcxl_dma_show(
struct seq_file *
m,
void *
v)
343 unsigned long *res_ptr = (
u_long *)pcxl_res_map;
345 unsigned long total_pages = pcxl_res_size << 3;
347 seq_printf(m,
"\nDMA Mapping Area size : %d bytes (%ld pages)\n",
350 seq_printf(m,
"Resource bitmap : %d bytes\n", pcxl_res_size);
352 seq_puts(m,
" total: free: used: % used:\n");
353 seq_printf(m,
"blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
354 pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
355 (pcxl_used_bytes * 100) / pcxl_res_size);
357 seq_printf(m,
"pages %8ld %8ld %8ld %8ld%%\n", total_pages,
358 total_pages - pcxl_used_pages, pcxl_used_pages,
359 (pcxl_used_pages * 100 / total_pages));
364 for(; i < (pcxl_res_size /
sizeof(
u_long)); ++
i, ++res_ptr) {
381 .open = proc_pcxl_dma_open,
390 if (pcxl_dma_start == 0)
398 memset(pcxl_res_map, 0, pcxl_res_size);
402 "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
405 ent = proc_create(
"pcxl_dma", 0, proc_gsc_root,
409 "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
428 map_uncached_pages(vaddr, size, paddr);
439 return (
void *)
vaddr;
442 static void pa11_dma_free_consistent (
struct device *dev,
size_t size,
void *vaddr,
dma_addr_t dma_handle)
448 unmap_uncached_pages((
unsigned long)vaddr, size);
449 pcxl_free_range((
unsigned long)vaddr, size);
484 for (i = 0; i < nents; i++, sglist++ ) {
504 for (i = 0; i < nents; i++, sglist++ )
529 for (i = 0; i < nents; i++, sglist++ )
539 for (i = 0; i < nents; i++, sglist++ )
544 .dma_supported = pa11_dma_supported,
545 .alloc_consistent = pa11_dma_alloc_consistent,
546 .alloc_noncoherent = pa11_dma_alloc_consistent,
547 .free_consistent = pa11_dma_free_consistent,
548 .map_single = pa11_dma_map_single,
549 .unmap_single = pa11_dma_unmap_single,
550 .map_sg = pa11_dma_map_sg,
551 .unmap_sg = pa11_dma_unmap_sg,
552 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
553 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
554 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
555 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
558 static void *fail_alloc_consistent(
struct device *dev,
size_t size,
564 static void *pa11_dma_alloc_noncoherent(
struct device *dev,
size_t size,
576 static void pa11_dma_free_noncoherent(
struct device *dev,
size_t size,
584 .dma_supported = pa11_dma_supported,
585 .alloc_consistent = fail_alloc_consistent,
586 .alloc_noncoherent = pa11_dma_alloc_noncoherent,
587 .free_consistent = pa11_dma_free_noncoherent,
588 .map_single = pa11_dma_map_single,
589 .unmap_single = pa11_dma_unmap_single,
590 .map_sg = pa11_dma_map_sg,
591 .unmap_sg = pa11_dma_unmap_sg,
592 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
593 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
594 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
595 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,