21 #include <linux/types.h>
59 #define SHMSYNCOFFSET 4
61 #define BUFFERSIZE 1024
63 #define TIHELEN_ACKTIMEOUT 10000
65 #define MMU_SECTION_ADDR_MASK 0xFFF00000
66 #define MMU_SSECTION_ADDR_MASK 0xFF000000
67 #define MMU_LARGE_PAGE_MASK 0xFFFF0000
68 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
69 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
70 #define PAGES_II_LVL_TABLE 512
71 #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
77 #define OMAP3_IVA2_BOOTMOD_IDLE 1
78 #define OMAP2_CONTROL_GENERAL 0x270
79 #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
80 #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
86 u32 dsp_addr,
u32 ul_num_bytes,
95 u32 dsp_addr,
u32 ul_num_bytes,
100 u32 dsp_dest_addr,
u32 dsp_src_addr,
103 u8 *host_buff,
u32 dsp_addr,
106 u32 ul_mpu_addr,
u32 virt_addr,
107 u32 ul_num_bytes,
u32 ul_map_attr,
108 struct page **mapped_pages);
110 u32 virt_addr,
u32 ul_num_bytes);
116 u32 dw_cmd,
void *pargs);
125 u32 ul_mpu_addr,
u32 virt_addr,
186 bridge_brd_set_state,
188 bridge_brd_mem_write,
190 bridge_brd_mem_un_map,
233 static void bad_page_dump(
u32 pa,
struct page *
pg)
235 pr_emerg(
"DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
236 pr_emerg(
"Bad page state in process '%s'\n"
237 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
239 current->comm, pg, (
int)(2 *
sizeof(
unsigned long)),
241 page_mapcount(pg), page_count(pg));
251 const char *driver_file_name)
253 if (
strcmp(driver_file_name,
"UMA") == 0)
254 *drv_intf = &drv_interface_fxns;
278 if (!(temp & 0x02)) {
311 u8 *host_buff,
u32 dsp_addr,
329 ul_num_bytes, mem_type);
333 memcpy(host_buff, (
void *)(dsp_base_addr + offset), ul_num_bytes);
369 u32 ul_shm_base_virt;
370 u32 ul_tlb_base_virt;
373 u32 ul_shm_offset_virt;
375 s32 itmp_entry_ndx = 0;
380 u32 ul_bios_gp_timer;
383 u32 ul_load_monitor_timer;
396 ul_tlb_base_virt = dev_context->
atlb_entry[0].dsp_va;
398 ul_shm_base_virt - (ul_tlb_base_virt *
DSPWORDSIZE);
400 ul_shm_base = dev_context->
atlb_entry[0].gpp_va + ul_shm_offset_virt;
403 shm_sync_pa = dev_context->
atlb_entry[0].gpp_pa + ul_shm_offset_virt +
413 if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
414 pr_err(
"%s: Illegal SM base\n", __func__);
480 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
508 temp = (temp & 0xFFFFFFEF) | 0x11;
519 &ul_load_monitor_timer);
523 if (ul_load_monitor_timer != 0xFFFF) {
525 ul_load_monitor_timer;
528 dev_dbg(bridge,
"Not able to get the symbol for Load "
534 if (ul_bios_gp_timer != 0xFFFF) {
540 "Not able to get the symbol for BIOS Timer\n");
547 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
552 if ((
unsigned int *)ul_dsp_clk_addr !=
NULL) {
555 dev_dbg(bridge,
"%s: DSP clock rate (KHZ): 0x%x \n",
556 __func__, ul_dsp_clk_rate);
557 (
void)bridge_brd_write(dev_context,
558 (
u8 *) &ul_dsp_clk_rate,
559 ul_dsp_clk_addr,
sizeof(
u32), 0);
566 if (IS_ERR(dev_context->
mbox)) {
568 pr_err(
"%s: Failed to get dsp mailbox handle\n",
577 temp = (temp & 0xFFFFFF30) | 0xC0;
582 temp = (temp & 0xFFFFFF3F);
586 temp = (temp & 0xFFFFFFFB) | 0x04;
594 dev_dbg(bridge,
"%s Unreset\n", __func__);
602 dev_dbg(bridge,
"Waiting for Sync @ 0x%x\n", *(
u32 *)sync_addr);
603 dev_dbg(bridge,
"DSP c_int00 Address = 0x%x\n", dsp_addr);
699 if (dev_context->
mbox) {
700 omap_mbox_disable_irq(dev_context->
mbox,
IRQ_RX);
731 u8 *host_buff,
u32 dsp_addr,
732 u32 ul_num_bytes,
u32 mem_type)
737 if (dsp_addr < dev_context->dsp_start_add) {
744 ul_num_bytes, mem_type);
747 ul_num_bytes, mem_type,
false);
788 dev_context->
atlb_entry[entry_ndx].dsp_va = 0;
801 if (pt_attrs !=
NULL) {
803 align_size = pt_attrs->
l1_size;
807 align_size, &pg_tbl_pa);
810 if ((pg_tbl_pa) & (align_size - 1)) {
818 align_size, &pg_tbl_pa);
826 (align_size - 1)) & (~(align_size - 1));
828 pg_tbl_va + (pt_attrs->
l1_base_pa - pg_tbl_pa);
849 align_size, &pg_tbl_pa);
863 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
868 dev_dbg(bridge,
"pt_attrs %p L2 NumPages %x pg_info %p\n",
888 dev_context->
dev_obj = hdev_obj;
893 bridge_brd_stop(dev_context);
895 *dev_cntxt = dev_context;
897 if (pt_attrs !=
NULL) {
925 u32 dw_cmd,
void *pargs)
949 status =
wake_dsp(dev_context, pargs);
994 bridge_brd_stop(dev_context);
1016 if (shm_size >= 0x10000) {
1026 dev_dbg(bridge,
"%s: Error getting shm size "
1027 "from registry: %x. Not calling "
1028 "mem_free_phys_mem\n", __func__,
1062 kfree((
void *)dev_ctxt);
1067 u32 dsp_dest_addr,
u32 dsp_src_addr,
1068 u32 ul_num_bytes,
u32 mem_type)
1077 while (total_bytes > 0 && !status) {
1082 copy_bytes, mem_type);
1088 dest_addr, copy_bytes,
1094 dest_addr, copy_bytes,
1098 total_bytes -= copy_bytes;
1099 src_addr += copy_bytes;
1100 dest_addr += copy_bytes;
1107 u8 *host_buff,
u32 dsp_addr,
1108 u32 ul_num_bytes,
u32 mem_type)
1112 u32 ul_remain_bytes = 0;
1114 ul_remain_bytes = ul_num_bytes;
1115 while (ul_remain_bytes > 0 && !status) {
1122 ul_bytes, mem_type);
1128 ul_remain_bytes -= ul_bytes;
1129 dsp_addr += ul_bytes;
1130 host_buff = host_buff + ul_bytes;
1145 u32 ul_mpu_addr,
u32 virt_addr,
1146 u32 ul_num_bytes,
u32 ul_map_attr,
1147 struct page **mapped_pages)
1156 u32 num_usr_pgs = 0;
1157 struct page *mapped_page, *
pg;
1165 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1166 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1168 if (ul_num_bytes == 0)
1172 attrs = ul_map_attr;
1186 if (hw_attrs.mixed_size == 0) {
1208 hw_attrs.donotlockmpupage = 1;
1210 hw_attrs.donotlockmpupage = 0;
1213 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1214 ul_num_bytes, &hw_attrs);
1222 status =
pte_update(dev_context, ul_mpu_addr, virt_addr,
1223 ul_num_bytes, &hw_attrs);
1236 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1237 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1246 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->
vm_end)) {
1250 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1251 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1256 pr_err(
"%s: Failed to get VMA region for 0x%x (%d)\n",
1257 __func__, ul_mpu_addr, ul_num_bytes);
1265 mpu_addr = ul_mpu_addr;
1268 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1269 pa = user_va2_pa(mm, mpu_addr);
1272 pr_err(
"DSPBRIDGE: VM_IO mapping physical"
1273 "address is invalid\n");
1279 if (page_count(pg) < 1) {
1280 pr_err(
"Bad page in VM_IO buffer\n");
1281 bad_page_dump(pa, pg);
1284 status = pte_set(dev_context->
pt_attrs, pa,
1295 if (vma->
vm_flags & (VM_WRITE | VM_MAYWRITE))
1298 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1300 write, 1, &mapped_page,
NULL);
1302 if (page_count(mapped_page) < 1) {
1303 pr_err(
"Bad page count after doing"
1309 status = pte_set(dev_context->
pt_attrs,
1316 mapped_pages[pg_i] = mapped_page;
1321 pr_err(
"DSPBRIDGE: get_user_pages FAILED,"
1323 "vma->vm_flags = 0x%lx,"
1324 "get_user_pages Err"
1325 "Value = %d, Buffer"
1326 "size=0x%x\n", ul_mpu_addr,
1327 vma->
vm_flags, pg_num, ul_num_bytes);
1341 bridge_brd_mem_un_map(dev_context, virt_addr,
1352 flush_all(dev_context);
1353 dev_dbg(bridge,
"%s status %x\n", __func__, status);
1366 u32 virt_addr,
u32 ul_num_bytes)
1376 u32 pte_addr_l2 = 0;
1386 u32 numof4k_pages = 0;
1388 va_curr = virt_addr;
1389 rem_bytes = ul_num_bytes;
1392 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1393 dev_dbg(bridge,
"%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1394 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1395 ul_num_bytes, l1_base_va, pte_addr_l1);
1397 while (rem_bytes && !status) {
1398 u32 va_curr_orig = va_curr;
1400 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1401 pte_val = *(
u32 *) pte_addr_l1;
1402 pte_size = hw_mmu_pte_size_l1(pte_val);
1405 goto skip_coarse_page;
1411 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1421 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1424 if (rem_bytes < (pte_count *
PG_SIZE4K))
1425 pte_count = rem_bytes / PG_SIZE4K;
1436 while (rem_bytes_l2 && !status) {
1437 pte_val = *(
u32 *) pte_addr_l2;
1438 pte_size = hw_mmu_pte_size_l2(pte_val);
1440 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1441 va_curr & (pte_size - 1)) {
1447 paddr = (pte_val & ~(pte_size - 1));
1453 while (temp++ < numof4k_pages) {
1459 if (page_count(pg) < 1) {
1460 pr_info(
"DSPBRIDGE: UNMAP function: "
1461 "COUNT 0 FOR PA 0x%x, size = "
1462 "0x%x\n", paddr, ul_num_bytes);
1463 bad_page_dump(paddr, pg);
1476 rem_bytes_l2 -= pte_size;
1477 va_curr += pte_size;
1478 pte_addr_l2 += (pte_size >> 12) *
sizeof(
u32);
1481 if (rem_bytes_l2 == 0) {
1482 pt->
pg_info[l2_page_num].num_entries -= pte_count;
1483 if (pt->
pg_info[l2_page_num].num_entries == 0) {
1505 if (pte_size == 0 || rem_bytes < pte_size ||
1506 va_curr & (pte_size - 1)) {
1512 numof4k_pages = 256;
1514 numof4k_pages = 4096;
1517 paddr = (pte_val & ~(pte_size - 1));
1518 while (temp++ < numof4k_pages) {
1521 if (page_count(pg) < 1) {
1522 pr_info(
"DSPBRIDGE: UNMAP function: "
1523 "COUNT 0 FOR PA 0x%x, size = "
1524 "0x%x\n", paddr, ul_num_bytes);
1525 bad_page_dump(paddr, pg);
1535 rem_bytes -= pte_size;
1536 va_curr += pte_size;
1547 flush_all(dev_context);
1549 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1550 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1551 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1610 while (num_bytes && !status) {
1613 all_bits = pa_curr | va_curr;
1615 for (i = 0; i < 4; i++) {
1616 if ((num_bytes >= page_size[i]) && ((all_bits &
1620 pte_set(dev_context->
pt_attrs, pa_curr,
1621 va_curr, page_size[i], map_attrs);
1622 pa_curr += page_size[
i];
1623 va_curr += page_size[
i];
1624 num_bytes -= page_size[
i];
1656 u32 l2_page_num = 0;
1660 pg_tbl_va = l1_base_va;
1663 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1665 pte_val = *(
u32 *) pte_addr_l1;
1666 pte_size = hw_mmu_pte_size_l1(pte_val);
1674 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1680 }
else if (pte_size == 0) {
1685 (pt->
pg_info[i].num_entries != 0); i++)
1687 if (i < pt->l2_num_pages) {
1708 pg_tbl_va = l2_base_va;
1710 pt->
pg_info[l2_page_num].num_entries += 16;
1712 pt->
pg_info[l2_page_num].num_entries++;
1713 dev_dbg(bridge,
"PTE: L2 BaseVa %x, BasePa %x, PageNum "
1714 "%x, num_entries %x\n", l2_base_va,
1715 l2_base_pa, l2_page_num,
1716 pt->
pg_info[l2_page_num].num_entries);
1721 dev_dbg(bridge,
"PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1722 pg_tbl_va, pa, va, size);
1723 dev_dbg(bridge,
"PTE: endianism %x, element_size %x, "
1734 u32 ul_mpu_addr,
u32 virt_addr,
1757 va_curr = ul_mpu_addr;
1760 while (!status && (i < num_pages)) {
1772 while (++i < num_pages) {
1777 if (pa_next == (pa_curr + size_curr))
1789 while (temp++ < num_of4k_pages) {
1793 status =
pte_update(dev_context, pa_curr, virt_addr +
1794 (va_curr - ul_mpu_addr), size_curr,
1796 va_curr += size_curr;
1804 flush_all(dev_context);
1805 dev_dbg(bridge,
"%s status %x\n", __func__, status);
1824 pr_err(
"%s: Timed out waiting DSP to Start\n", __func__);