21 #include <linux/kernel.h>
22 #include <linux/export.h>
24 #include <linux/slab.h>
31 #include <asm/setup.h>
36 #define DBG udbg_printf
42 #if defined(CONFIG_PS3_DYNAMIC_DMA)
55 static unsigned long make_page_sizes(
unsigned long a,
unsigned long b)
57 return (a << 56) | (b << 48);
117 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
121 DBG(
"%s:%d: map.total = %llxh\n", func, line, m->
total);
122 DBG(
"%s:%d: map.rm.size = %llxh\n", func, line, m->
rm.size);
123 DBG(
"%s:%d: map.vas_id = %llu\n", func, line, m->
vas_id);
124 DBG(
"%s:%d: map.htab_size = %llxh\n", func, line, m->
htab_size);
125 DBG(
"%s:%d: map.r1.base = %llxh\n", func, line, m->
r1.base);
126 DBG(
"%s:%d: map.r1.offset = %lxh\n", func, line, m->
r1.offset);
127 DBG(
"%s:%d: map.r1.size = %llxh\n", func, line, m->
r1.size);
141 ? phys_addr : phys_addr +
map.
r1.offset;
159 result = lv1_query_logical_partition_address_region_info(0,
160 &start_address, &size, &access_right, &max_page_size,
164 DBG(
"%s:%d: lv1_query_logical_partition_address_region_info "
165 "failed: %s\n", __func__, __LINE__,
171 DBG(
"%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
179 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
184 DBG(
"%s:%d: lv1_construct_virtual_address_space failed: %s\n",
185 __func__, __LINE__, ps3_result(result));
189 result = lv1_select_virtual_address_space(
map.
vas_id);
192 DBG(
"%s:%d: lv1_select_virtual_address_space failed: %s\n",
193 __func__, __LINE__, ps3_result(result));
204 panic(
"ps3_mm_vas_create failed");
215 DBG(
"%s:%d: map.vas_id = %llu\n", __func__, __LINE__,
map.
vas_id);
218 result = lv1_select_virtual_address_space(0);
220 result = lv1_destruct_virtual_address_space(
map.
vas_id);
235 static int ps3_mm_region_create(
struct mem_region *
r,
unsigned long size)
242 DBG(
"%s:%d requested %lxh\n", __func__, __LINE__, size);
243 DBG(
"%s:%d actual %llxh\n", __func__, __LINE__, r->
size);
244 DBG(
"%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
245 size - r->
size, (size - r->
size) / 1024 / 1024);
248 DBG(
"%s:%d: size == 0\n", __func__, __LINE__);
257 DBG(
"%s:%d: lv1_allocate_memory failed: %s\n",
258 __func__, __LINE__, ps3_result(result));
276 static void ps3_mm_region_destroy(
struct mem_region *r)
281 pr_info(
"%s:%d: Not destroying high region: %llxh %llxh\n",
282 __func__, __LINE__, r->
base, r->
size);
286 DBG(
"%s:%d: r->base = %llxh\n", __func__, __LINE__, r->
base);
289 result = lv1_release_memory(r->
base);
296 static int ps3_mm_get_repository_highmem(
struct mem_region *r)
314 DBG(
"%s:%d: Found high region in repository: %llxh %llxh\n",
315 __func__, __LINE__, r->
base, r->
size);
320 DBG(
"%s:%d: No high region in repository.\n", __func__, __LINE__);
337 unsigned long lpar_addr)
339 if (lpar_addr >=
map.
rm.size)
340 lpar_addr -=
map.
r1.offset;
346 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
350 DBG(
"%s:%d: dev %llu:%llu\n", func, line, r->
dev->bus_id,
353 DBG(
"%s:%d: bus_addr %lxh\n", func, line, r->
bus_addr);
354 DBG(
"%s:%d: len %lxh\n", func, line, r->
len);
355 DBG(
"%s:%d: offset %lxh\n", func, line, r->
offset);
381 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
382 static void _dma_dump_chunk (
const struct dma_chunk*
c,
const char*
func,
385 DBG(
"%s:%d: r.dev %llu:%llu\n", func, line,
387 DBG(
"%s:%d: r.bus_addr %lxh\n", func, line, c->
region->bus_addr);
388 DBG(
"%s:%d: r.page_size %u\n", func, line, c->
region->page_size);
389 DBG(
"%s:%d: r.len %lxh\n", func, line, c->
region->len);
390 DBG(
"%s:%d: r.offset %lxh\n", func, line, c->
region->offset);
391 DBG(
"%s:%d: c.lpar_addr %lxh\n", func, line, c->
lpar_addr);
392 DBG(
"%s:%d: c.bus_addr %lxh\n", func, line, c->
bus_addr);
393 DBG(
"%s:%d: c.len %lxh\n", func, line, c->
len);
401 unsigned long aligned_len =
_ALIGN_UP(len+bus_addr-aligned_bus,
407 aligned_bus + aligned_len <= c->bus_addr + c->
len)
411 if (aligned_bus + aligned_len <= c->bus_addr)
426 unsigned long lpar_addr,
unsigned long len)
430 unsigned long aligned_len =
_ALIGN_UP(len + lpar_addr - aligned_lpar,
436 aligned_lpar < c->lpar_addr + c->
len) {
437 if (aligned_lpar + aligned_len <= c->lpar_addr + c->
len)
445 if (aligned_lpar + aligned_len <= c->lpar_addr) {
456 static int dma_sb_free_chunk(
struct dma_chunk *c)
461 result = lv1_unmap_device_dma_region(c->
region->dev->bus_id,
470 static int dma_ioc0_free_chunk(
struct dma_chunk *c)
477 DBG(
"%s:start\n", __func__);
478 for (iopage = 0; iopage < (c->
len >> r->
page_size); iopage++) {
481 result = lv1_put_iopte(0,
486 DBG(
"%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
492 DBG(
"%s:%d: lv1_put_iopte failed: %s\n", __func__,
493 __LINE__, ps3_result(result));
497 DBG(
"%s:end\n", __func__);
513 unsigned long len,
struct dma_chunk **c_out,
u64 iopte_flag)
530 BUG_ON(iopte_flag != 0xf800000000000000UL);
531 result = lv1_map_device_dma_region(c->
region->dev->bus_id,
535 DBG(
"%s:%d: lv1_map_device_dma_region failed: %s\n",
536 __func__, __LINE__, ps3_result(result));
549 DBG(
" <- %s:%d\n", __func__, __LINE__);
553 static int dma_ioc0_map_pages(
struct ps3_dma_region *r,
unsigned long phys_addr,
554 unsigned long len,
struct dma_chunk **c_out,
562 DBG(
KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
583 DBG(
"%s: last bus=%#lx, len=%#lx\n", __func__,
591 DBG(
"%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
593 for (iopage = 0; iopage <
pages; iopage++) {
595 result = lv1_put_iopte(0,
601 pr_warning(
"%s:%d: lv1_put_iopte failed: %s\n",
602 __func__, __LINE__, ps3_result(result));
605 DBG(
"%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
614 DBG(
"%s: end\n", __func__);
618 for (iopage--; 0 <= iopage; iopage--) {
644 DBG(
" -> %s:%d:\n", __func__, __LINE__);
648 if (!r->
dev->bus_id) {
649 pr_info(
"%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
650 r->
dev->bus_id, r->
dev->dev_id);
654 DBG(
"%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
664 result = lv1_allocate_device_dma_region(r->
dev->bus_id, r->
dev->dev_id,
670 DBG(
"%s:%d: lv1_allocate_device_dma_region failed: %s\n",
671 __func__, __LINE__, ps3_result(result));
686 result = lv1_allocate_io_segment(0,
692 DBG(
"%s:%d: lv1_allocate_io_segment failed: %s\n",
693 __func__, __LINE__, ps3_result(result));
696 DBG(
"%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
717 if (!r->
dev->bus_id) {
718 pr_info(
"%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
719 r->
dev->bus_id, r->
dev->dev_id);
725 dma_sb_free_chunk(c);
728 result = lv1_free_device_dma_region(r->
dev->bus_id, r->
dev->dev_id,
732 DBG(
"%s:%d: lv1_free_device_dma_region failed: %s\n",
733 __func__, __LINE__, ps3_result(result));
745 DBG(
"%s: start\n", __func__);
748 dma_ioc0_free_chunk(c);
751 result = lv1_release_io_segment(0, r->
bus_addr);
754 DBG(
"%s:%d: lv1_free_device_dma_region failed: %s\n",
755 __func__, __LINE__, ps3_result(result));
758 DBG(
"%s: end\n", __func__);
774 static int dma_sb_map_area(
struct ps3_dma_region *r,
unsigned long virt_addr,
784 unsigned long aligned_len =
_ALIGN_UP(len + phys_addr - aligned_phys,
790 DBG(
" -> %s:%d\n", __func__, __LINE__);
791 DBG(
"%s:%d virt_addr %lxh\n", __func__, __LINE__,
793 DBG(
"%s:%d phys_addr %lxh\n", __func__, __LINE__,
795 DBG(
"%s:%d lpar_addr %lxh\n", __func__, __LINE__,
797 DBG(
"%s:%d len %lxh\n", __func__, __LINE__, len);
798 DBG(
"%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
803 c = dma_find_chunk(r, *bus_addr, len);
806 DBG(
"%s:%d: reusing mapped chunk", __func__, __LINE__);
809 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
813 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
817 DBG(
"%s:%d: dma_sb_map_pages failed (%d)\n",
818 __func__, __LINE__, result);
819 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
825 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
829 static int dma_ioc0_map_area(
struct ps3_dma_region *r,
unsigned long virt_addr,
839 unsigned long aligned_len =
_ALIGN_UP(len + phys_addr - aligned_phys,
842 DBG(
KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
844 DBG(
KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
845 phys_addr, aligned_phys, aligned_len);
853 *bus_addr = c->
bus_addr + phys_addr - aligned_phys;
855 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
859 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
864 DBG(
"%s:%d: dma_ioc0_map_pages failed (%d)\n",
865 __func__, __LINE__, result);
866 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
869 *bus_addr = c->
bus_addr + phys_addr - aligned_phys;
870 DBG(
"%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
871 virt_addr, phys_addr, aligned_phys, *bus_addr);
874 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
894 c = dma_find_chunk(r, bus_addr, len);
899 unsigned long aligned_len =
_ALIGN_UP(len + bus_addr
901 DBG(
"%s:%d: not found: bus_addr %llxh\n",
902 __func__, __LINE__, bus_addr);
903 DBG(
"%s:%d: not found: len %lxh\n",
904 __func__, __LINE__, len);
905 DBG(
"%s:%d: not found: aligned_bus %lxh\n",
906 __func__, __LINE__, aligned_bus);
907 DBG(
"%s:%d: not found: aligned_len %lxh\n",
908 __func__, __LINE__, aligned_len);
916 dma_sb_free_chunk(c);
919 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
929 DBG(
"%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
931 c = dma_find_chunk(r, bus_addr, len);
936 unsigned long aligned_len =
_ALIGN_UP(len + bus_addr
939 DBG(
"%s:%d: not found: bus_addr %llxh\n",
940 __func__, __LINE__, bus_addr);
941 DBG(
"%s:%d: not found: len %lxh\n",
942 __func__, __LINE__, len);
943 DBG(
"%s:%d: not found: aligned_bus %lxh\n",
944 __func__, __LINE__, aligned_bus);
945 DBG(
"%s:%d: not found: aligned_len %lxh\n",
946 __func__, __LINE__, aligned_len);
954 dma_ioc0_free_chunk(c);
957 spin_unlock_irqrestore(&r->
chunk_list.lock, flags);
958 DBG(
"%s: end\n", __func__);
973 unsigned long virt_addr,
len;
976 if (r->
len > 16*1024*1024) {
979 pr_info(
"%s:%d: forcing 16M pages for linear map\n",
986 result = dma_sb_region_create(r);
995 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1003 virt_addr =
map.
rm.size;
1009 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1037 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1038 result = dma_sb_unmap_area(r, bus_addr, len);
1044 lpar_addr =
map.
r1.base;
1050 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1051 result = dma_sb_unmap_area(r, bus_addr, len);
1055 result = dma_sb_region_free(r);
1074 unsigned long virt_addr,
unsigned long len,
dma_addr_t *bus_addr,
1099 .create = dma_sb_region_create,
1100 .free = dma_sb_region_free,
1101 .map = dma_sb_map_area,
1102 .unmap = dma_sb_unmap_area
1106 .create = dma_sb_region_create_linear,
1107 .free = dma_sb_region_free_linear,
1108 .map = dma_sb_map_area_linear,
1109 .unmap = dma_sb_unmap_area_linear
1113 .create = dma_ioc0_region_create,
1114 .free = dma_ioc0_region_free,
1115 .map = dma_ioc0_map_area,
1116 .unmap = dma_ioc0_unmap_area
1123 unsigned long lpar_addr;
1138 ? &ps3_dma_sb_region_ops
1139 : &ps3_dma_sb_region_linear_ops;
1174 return r->
region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1180 return r->
region_ops->unmap(r, bus_addr, len);
1195 DBG(
" -> %s:%d\n", __func__, __LINE__);
1201 panic(
"ps3_repository_read_mm_info() failed");
1213 if (ps3_mm_get_repository_highmem(&
map.
r1))
1220 DBG(
"%s:%d: No highmem region found\n", __func__, __LINE__);
1222 DBG(
"%s:%d: Adding highmem region: %llxh %llxh\n",
1223 __func__, __LINE__,
map.
rm.size,
1228 DBG(
" <- %s:%d\n", __func__, __LINE__);
1237 ps3_mm_region_destroy(&
map.
r1);