13 #include <linux/module.h>
15 #include <linux/slab.h>
17 #include <linux/device.h>
21 #include <asm/cacheflush.h>
32 static unsigned int sgtable_offset(
const struct sg_table *
sgt)
34 if (!sgt || !sgt->
nents)
37 return sgt->
sgl->offset;
41 static size_t sgtable_len(
const struct sg_table *sgt)
43 unsigned int i, total = 0;
55 pr_err(
"%s: sg[%d] not iommu pagesize(%u %u)\n",
56 __func__, i, bytes, sg->
offset);
61 pr_err(
"%s: sg[%d] offset not allowed in internal "
62 "entries\n", __func__, i);
71 #define sgtable_ok(x) (!!sgtable_len(x))
73 static unsigned max_alignment(
u32 addr)
77 for (i = 0; i <
ARRAY_SIZE(pagesize) && addr & (pagesize[
i] - 1); i++)
79 return (i <
ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
86 static unsigned sgtable_nents(
size_t bytes,
u32 da,
u32 pa)
88 unsigned nr_entries = 0, ent_sz;
91 pr_err(
"%s: wrong size %08x\n", __func__, bytes);
96 ent_sz = max_alignment(da | pa);
111 unsigned int nr_entries;
122 nr_entries = sgtable_nents(bytes, da, pa);
138 pr_debug(
"%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
144 static void sgtable_free(
struct sg_table *sgt)
152 pr_debug(
"%s: sgt:%p\n", __func__, sgt);
156 static void *vmap_sg(
const struct sg_table *sgt)
169 total = sgtable_len(sgt);
196 (
unsigned long)(new->addr + total));
205 static inline void vunmap_sg(
const void *va)
221 dev_dbg(obj->
dev,
"%s: %08x-%08x-%08x(%x) %08x\n",
241 struct omap_iommu *obj = dev_to_omap_iommu(dev);
245 area = __find_iovm_area(obj, da);
257 size_t bytes,
u32 flags)
272 if (flags & IOVMF_LINEAR)
274 start =
roundup(start, alignment);
276 obj->
da_end - start < bytes) {
281 if (list_empty(&obj->
mmap))
287 if (prev_end > start)
293 if (tmp->
da_end >= start && ~flags & IOVMF_DA_FIXED)
299 if ((start >= prev_end) && (obj->
da_end - start >= bytes))
302 dev_dbg(obj->
dev,
"%s: no space to fit %08x(%x) flags: %08x\n",
303 __func__, da, bytes, flags);
308 new = kmem_cache_zalloc(iovm_area_cachep,
GFP_KERNEL);
314 new->da_end = start +
bytes;
323 list_add(&new->list, &obj->
mmap);
325 dev_dbg(obj->
dev,
"%s: found %08x-%08x-%08x(%x) %08x\n",
326 __func__, new->da_start, start, new->da_end, bytes, flags);
356 struct omap_iommu *obj = dev_to_omap_iommu(dev);
362 area = __find_iovm_area(obj, da);
364 dev_dbg(obj->
dev,
"%s: no da area(%08x)\n", __func__, da);
375 static void sgtable_fill_vmalloc(
struct sg_table *sgt,
void *_va)
391 sg_set_page(sg, pg, bytes, 0);
399 static inline void sgtable_drain_vmalloc(
struct sg_table *sgt)
415 u32 da =
new->da_start;
434 pr_debug(
"%s: [%d] %08x %08x(%x)\n", __func__,
437 err =
iommu_map(domain, da, pa, bytes, flags);
482 if (unmapped < bytes)
485 dev_dbg(obj->
dev,
"%s: unmap %08x(%x) %08x\n",
486 __func__, start, bytes, area->
flags);
499 void (*
fn)(
const void *),
u32 flags)
505 dev_err(obj->
dev,
"%s: alignment err(%08x)\n", __func__, da);
511 area = __find_iovm_area(obj, da);
513 dev_dbg(obj->
dev,
"%s: no da area(%08x)\n", __func__, da);
517 if ((area->
flags & flags) != flags) {
518 dev_err(obj->
dev,
"%s: wrong flags(%08x)\n", __func__,
524 unmap_iovm_area(domain, obj, area);
528 dev_dbg(obj->
dev,
"%s: %08x-%08x-%08x(%x) %08x\n", __func__,
532 free_iovm_area(obj, area);
541 size_t bytes,
u32 flags)
548 new = alloc_iovm_area(obj, da, bytes, flags);
551 goto err_alloc_iovma;
556 if (map_iovm_area(domain,
new, sgt, new->flags))
561 dev_dbg(obj->
dev,
"%s: da:%08x(%x) flags:%08x va:%p\n",
562 __func__, new->da_start, bytes, new->flags, va);
564 return new->da_start;
567 free_iovm_area(obj,
new);
576 void *va,
size_t bytes,
u32 flags)
578 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
594 struct omap_iommu *obj = dev_to_omap_iommu(dev);
598 if (!obj || !obj->
dev || !sgt)
601 bytes = sgtable_len(sgt);
615 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
619 return da + sgtable_offset(sgt);
635 struct omap_iommu *obj = dev_to_omap_iommu(dev);
642 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
662 size_t bytes,
u32 flags)
664 struct omap_iommu *obj = dev_to_omap_iommu(dev);
668 if (!obj || !obj->
dev || !bytes)
680 sgt = sgtable_alloc(bytes, flags, da, 0);
685 sgtable_fill_vmalloc(sgt, va);
687 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
694 sgtable_drain_vmalloc(sgt);
713 struct omap_iommu *obj = dev_to_omap_iommu(dev);
716 sgt = unmap_vm_area(domain, obj, da,
vfree,
724 static int __init iovmm_init(
void)
733 iovm_area_cachep =
p;
739 static void __exit iovmm_exit(
void)