26 #include <linux/types.h>
27 #include <linux/list.h>
70 #define OUTPUTNOTREADY 0xffff
71 #define NOTENABLED 0xffff
73 #define EXTEND "_EXT_END"
75 #define SWAP_WORD(x) (x)
76 #define UL_PAGE_ALIGN_SIZE 0x10000
78 #define MAX_PM_REQS 32
80 #define MMU_FAULT_HEAD1 0xa5a5a5a5
81 #define MMU_FAULT_HEAD2 0x96969696
83 #define MAX_MMU_DBGBUFF 10240
114 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
115 u32 trace_buffer_begin;
116 u32 trace_buffer_end;
117 u32 trace_buffer_current;
118 u32 gpp_read_pointer;
142 static void io_dispatch_pm(
struct io_mgr *pio_mgr);
143 static void notify_chnl_complete(
struct chnl_object *pchnl,
149 static void input_msg(
struct io_mgr *pio_mgr,
struct msg_mgr *hmsg_mgr);
150 static void output_msg(
struct io_mgr *pio_mgr,
struct msg_mgr *hmsg_mgr);
151 static u32 find_ready_output(
struct chnl_mgr *chnl_mgr_obj,
155 static int register_shm_segs(
struct io_mgr *hio_mgr,
159 static inline void set_chnl_free(
struct shm *
sm,
u32 chnl)
164 static inline void set_chnl_busy(
struct shm *
sm,
u32 chnl)
180 struct cfg_devnode *dev_node_obj;
185 if (!io_man || !mgr_attrts || mgr_attrts->
word_size == 0)
191 if (!hchnl_mgr || hchnl_mgr->
iomgr)
200 if (!hbridge_context)
219 pio_mgr->dpc_req = 0;
220 pio_mgr->dpc_sched = 0;
238 hchnl_mgr->
iomgr = pio_mgr;
256 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
315 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
335 return ERR_PTR(status);
358 u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs;
359 u32 seg0_sz, seg1_sz;
361 u32 pa_curr, va_curr, da_curr;
380 if (!hchnl_mgr || !hmsg_mgr)
394 dev_dbg(
bridge,
"%s: (proc)proccopy shmmem size: 0x%x bytes\n",
395 __func__, shm_sz -
sizeof(
struct shm));
401 mem_sz = shm_sz + msg_sz;
411 if (num_procs != 1) {
427 seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL);
430 seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL);
436 dev_dbg(
bridge,
"%s: pa %x, va %x, da %x\n", __func__, pa, va, da);
438 "shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n",
441 if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->
mem_length[1]) {
442 pr_err(
"%s: shm Error, reserved 0x%x required 0x%x\n",
444 seg0_sz + seg1_sz + pad_sz);
465 all_bits = pa_curr | va_curr;
467 "seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
468 all_bits, pa_curr, va_curr, bytes);
470 for (i = 0; i < 4; i++) {
471 if ((bytes >= page_size[i]) &&
472 ((all_bits & (page_size[i] - 1)) == 0)) {
473 status = hio_mgr->
intf_fxns->brd_mem_map(dc,
475 page_size[i], map_attrs,
480 pa_curr += page_size[
i];
481 va_curr += page_size[
i];
482 da_curr += page_size[
i];
483 bytes -= page_size[
i];
513 all_bits = pa_curr | va_curr;
515 "seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
516 all_bits, pa_curr, va_curr, bytes);
518 for (i = 0; i < 4; i++) {
519 if (!(bytes >= page_size[i]) ||
520 !((all_bits & (page_size[i] - 1)) == 0))
524 status = hio_mgr->
intf_fxns->brd_mem_map(dc,
526 page_size[i], map_attrs,
529 "PTE pa %x va %x dsp_va %x sz %x\n",
539 eproc[ndx].
gpp_pa = pa_curr;
545 eproc[ndx].
gpp_va = da_curr;
547 eproc[ndx].
size = page_size[
i];
552 __func__, eproc[ndx].gpp_pa,
558 pa_curr += page_size[
i];
559 va_curr += page_size[
i];
560 da_curr += page_size[
i];
561 bytes -= page_size[
i];
578 if (ep->
ty_tlb[i].gpp_phys == 0)
581 if ((ep->
ty_tlb[i].gpp_phys > pa - 0x100000 &&
582 ep->
ty_tlb[i].gpp_phys <= pa + seg0_sz) ||
583 (ep->
ty_tlb[i].dsp_virt > da - 0x100000 / word_sz &&
584 ep->
ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) {
586 "err cdb%d pa %x da %x shm pa %x da %x sz %x\n",
587 i, ep->
ty_tlb[i].gpp_phys,
588 ep->
ty_tlb[i].dsp_virt, pa, da, seg0_sz);
594 status = hio_mgr->
intf_fxns->brd_mem_map(dc,
597 0x100000, map_attrs,
NULL);
607 eproc[ndx].
size = 0x100000;
609 eproc[ndx].gpp_pa, eproc[ndx].dsp_va);
615 while (l4_peripheral_table[i].
phys_addr) {
616 status = hio_mgr->
intf_fxns->brd_mem_map(dc,
617 l4_peripheral_table[i].phys_addr,
618 l4_peripheral_table[i].dsp_virt_addr,
638 (va + seg1_sz + pad_sz);
674 status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa);
679 sizeof(
struct shm)) / 2;
696 dev_dbg(
bridge,
"IO MGR shm details: shared_mem %p, input %p, "
697 "output %p, msg_input_ctrl %p, msg_input %p, "
698 "msg_output_ctrl %p, msg_output %p\n",
707 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
710 &hio_mgr->trace_buffer_begin);
714 hio_mgr->gpp_read_pointer =
715 hio_mgr->trace_buffer_begin =
716 (va + seg1_sz + pad_sz) +
717 (hio_mgr->trace_buffer_begin - da);
721 &hio_mgr->trace_buffer_end);
725 hio_mgr->trace_buffer_end =
726 (va + seg1_sz + pad_sz) +
727 (hio_mgr->trace_buffer_end - da);
731 &hio_mgr->trace_buffer_current);
735 hio_mgr->trace_buffer_current =
736 (va + seg1_sz + pad_sz) +
737 (hio_mgr->trace_buffer_current - da);
741 hio_mgr->msg =
kmalloc(((hio_mgr->trace_buffer_end -
742 hio_mgr->trace_buffer_begin) *
749 hio_mgr->dsp_va =
da;
750 hio_mgr->gpp_va = (va + seg1_sz + pad_sz);
787 set_chnl_free(sm, chnl);
799 static void io_dispatch_pm(
struct io_mgr *pio_mgr)
809 dev_dbg(bridge,
"PM: Hibernate command\n");
814 pr_err(
"%s: hibernate cmd failed 0x%x\n",
817 parg[1] = pio_mgr->
shared_mem->opp_request.rqst_opp_pt;
818 dev_dbg(bridge,
"PM: Requested OPP = 0x%x\n", parg[1]);
823 dev_dbg(bridge,
"PM: Failed to set constraint "
824 "= 0x%x\n", parg[1]);
826 dev_dbg(bridge,
"PM: clk control value of msg = 0x%x\n",
832 dev_dbg(bridge,
"PM: Failed to ctrl the DSP clk"
863 if (serviced == requested)
873 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
874 print_dsp_debug_trace(pio_mgr);
887 input_msg(pio_mgr, msg_mgr_obj);
888 output_msg(pio_mgr, msg_mgr_obj);
892 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
895 print_dsp_debug_trace(pio_mgr);
899 }
while (serviced != requested);
925 io_dispatch_pm(pio_mgr);
932 spin_unlock_irqrestore(&pio_mgr->
dpc_lock, flags);
945 u8 io_mode,
u16 *mbx_val)
950 if (!pchnl || !mbx_val)
952 chnl_mgr_obj = io_manager->
chnl_mgr;
956 set_chnl_busy(sm, pchnl->
chnl_id);
984 spin_unlock_irqrestore(&io_manager->
dpc_lock, flags);
997 static u32 find_ready_output(
struct chnl_mgr *chnl_mgr_obj,
1022 }
while (
id != start_id);
1041 bool clear_chnl =
false;
1042 bool notify_client =
false;
1058 pchnl = chnl_mgr_obj->
channels[chnl_id];
1078 pio_mgr->
input, bytes);
1081 chnl_packet_obj->
arg = dw_arg;
1097 chnl_packet_obj->
status |=
1109 set_chnl_free(sm, pchnl->
chnl_id);
1111 notify_client =
true;
1134 if (notify_client) {
1136 notify_chnl_complete(pchnl, chnl_packet_obj);
1146 static void input_msg(
struct io_mgr *pio_mgr,
struct msg_mgr *hmsg_mgr)
1161 num_msgs = msg_ctr_obj->
size;
1166 for (i = 0; i < num_msgs; i++) {
1183 dev_dbg(bridge,
"input msg: cmd=0x%x arg1=0x%x "
1184 "arg2=0x%x msgq_id=0x%x\n",
msg.msg.
cmd,
1185 msg.msg.arg1,
msg.msg.arg2,
msg.msgq_id);
1215 pr_err(
"%s: no free msg frames,"
1216 " discarding msg\n",
1227 ntfy_notify(msg_queue_obj->
ntfy_obj,
1246 static void notify_chnl_complete(
struct chnl_object *pchnl,
1251 if (!pchnl || !pchnl->
sync_event || !chnl_packet_obj)
1303 find_ready_output(chnl_mgr_obj, pchnl,
1308 pchnl = chnl_mgr_obj->
channels[chnl_id];
1335 sm->
arg = chnl_packet_obj->
arg;
1336 #if _CHNL_WORDSIZE == 2
1352 notify_chnl_complete(pchnl, chnl_packet_obj);
1365 static void output_msg(
struct io_mgr *pio_mgr,
struct msg_mgr *hmsg_mgr)
1386 for (i = 0; i < num_msgs; i++) {
1399 addr = (
u32) &msg_output->
msg.cmd;
1403 addr = (
u32) &msg_output->
msg.arg1;
1407 addr = (
u32) &msg_output->
msg.arg2;
1417 #if _CHNL_WORDSIZE == 2
1422 msg_ctr_obj->
size = (
u16) num_msgs;
1424 msg_ctr_obj->
size = num_msgs;
1439 static int register_shm_segs(
struct io_mgr *hio_mgr,
1444 u32 ul_shm0_base = 0;
1446 u32 ul_shm0_rsrvd_start = 0;
1447 u32 ul_rsrvd_size = 0;
1450 u32 ul_shm_seg_id0 = 0;
1451 u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1459 if (ul_shm0_base == 0) {
1468 if (shm0_end == 0) {
1478 &ul_shm0_rsrvd_start);
1479 if (ul_shm0_rsrvd_start == 0) {
1493 if (!status && (shm0_end - ul_shm0_base) > 0) {
1496 (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->
word_size;
1497 if (ul_rsrvd_size <= 0) {
1503 (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->
word_size;
1504 if (ul_dsp_size <= 0) {
1518 if (dw_gpp_base_pa > ul_dsp_virt)
1519 dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1521 dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1523 if (ul_shm0_rsrvd_start * hio_mgr->
word_size < ul_dsp_virt) {
1532 ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->
word_size -
1539 dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->
word_size -
1544 ul_rsrvd_size, dw_offset,
1548 (
u32) (ul_shm0_base *
1550 ul_dsp_size, &ul_shm_seg_id0,
1553 if (ul_shm_seg_id0 != 1)
1567 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1569 struct dspbridge_platform_data *
pdata =
1576 hio_mgr->
shared_mem->opp_table_struct.curr_opp_pt =
1586 for (i = 0; i <= dsp_max_opps; i++) {
1587 hio_mgr->
shared_mem->opp_table_struct.opp_point[
i].
1595 hio_mgr->
shared_mem->opp_table_struct.opp_point[
i].
1599 hio_mgr->
shared_mem->opp_table_struct.opp_point[
i].
1604 hio_mgr->
shared_mem->opp_table_struct.num_opp_pts =
1606 dev_dbg(
bridge,
"OPP-shm: max OPP number: %d\n", dsp_max_opps);
1608 if (pdata->dsp_get_opp)
1609 i = (*pdata->dsp_get_opp) ();
1610 hio_mgr->
shared_mem->opp_table_struct.curr_opp_pt =
i;
1615 *(
u32 *) pargs = hio_mgr->
shared_mem->opp_request.rqst_opp_pt;
1635 hio_mgr->
shared_mem->load_mon_info.curr_dsp_load;
1637 hio_mgr->
shared_mem->load_mon_info.pred_dsp_load;
1639 hio_mgr->
shared_mem->load_mon_info.curr_dsp_freq;
1641 hio_mgr->
shared_mem->load_mon_info.pred_dsp_freq;
1643 dev_dbg(
bridge,
"Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1644 "Pred Freq = %d\n", proc_lstat->
curr_load,
1651 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
1652 void print_dsp_debug_trace(
struct io_mgr *hio_mgr)
1654 u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1658 ul_gpp_cur_pointer =
1659 *(
u32 *) (hio_mgr->trace_buffer_current);
1660 ul_gpp_cur_pointer =
1661 hio_mgr->gpp_va + (ul_gpp_cur_pointer -
1665 if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
1667 }
else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
1669 ul_new_message_length =
1670 ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
1673 (
char *)hio_mgr->gpp_read_pointer,
1674 ul_new_message_length);
1675 hio_mgr->msg[ul_new_message_length] =
'\0';
1680 hio_mgr->gpp_read_pointer += ul_new_message_length;
1682 pr_info(
"DSPTrace: %s\n", hio_mgr->msg);
1683 }
else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
1686 (
char *)hio_mgr->gpp_read_pointer,
1687 hio_mgr->trace_buffer_end -
1688 hio_mgr->gpp_read_pointer);
1689 ul_new_message_length =
1690 ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
1691 memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end -
1692 hio_mgr->gpp_read_pointer],
1693 (
char *)hio_mgr->trace_buffer_begin,
1694 ul_new_message_length);
1695 hio_mgr->msg[hio_mgr->trace_buffer_end -
1696 hio_mgr->gpp_read_pointer +
1697 ul_new_message_length] =
'\0';
1702 hio_mgr->gpp_read_pointer =
1703 hio_mgr->trace_buffer_begin +
1704 ul_new_message_length;
1706 pr_info(
"DSPTrace: %s\n", hio_mgr->msg);
1712 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1732 u32 ul_num_bytes = 0;
1733 u32 ul_num_words = 0;
1734 u32 ul_word_size = 2;
1767 ul_num_bytes = (ul_trace_end - ul_trace_begin);
1769 ul_num_words = ul_num_bytes * ul_word_size;
1775 psz_buf = kzalloc(ul_num_bytes + 2,
GFP_ATOMIC);
1776 if (psz_buf !=
NULL) {
1778 status = (*intf_fxns->
brd_read)(pbridge_context,
1779 (
u8 *)psz_buf, (
u32)ul_trace_begin,
1787 "before pack and unpack.\n");
1788 pr_debug(
"%s: DSP Trace Buffer Begin:\n"
1789 "=======================\n%s\n",
1793 status = (*intf_fxns->
brd_read)(pbridge_context,
1794 (
u8 *)&trace_cur_pos, (
u32)trace_cur_pos,
1799 pr_info(
"DSP Trace Buffer Begin:\n"
1800 "=======================\n%s\n",
1805 trace_cur_pos = trace_cur_pos - ul_trace_begin;
1813 buf_end = &psz_buf[ul_num_bytes+1];
1815 trace_end = &psz_buf[trace_cur_pos];
1823 ul_num_bytes = buf_end - str_beg;
1825 while (str_beg < buf_end) {
1826 new_line =
strnchr(str_beg, ul_num_bytes,
1828 if (new_line && new_line < buf_end) {
1831 str_beg = ++new_line;
1832 ul_num_bytes = buf_end - str_beg;
1838 if (*str_beg !=
'\0') {
1839 str_beg[ul_num_bytes] = 0;
1852 ul_num_bytes = trace_end - str_beg;
1854 while (str_beg < trace_end) {
1855 new_line =
strnchr(str_beg, ul_num_bytes,
'\n');
1856 if (new_line !=
NULL && new_line < trace_end) {
1859 str_beg = ++new_line;
1860 ul_num_bytes = trace_end - str_beg;
1866 if (*str_beg !=
'\0') {
1867 str_beg[ul_num_bytes] = 0;
1875 pr_info(
"\n=======================\n"
1876 "DSP Trace Buffer End:\n");
1883 dev_dbg(bridge,
"%s Failed, status 0x%x\n", __func__, status);
1902 } mmu_fault_dbg_info;
1912 const char *dsp_regs[] = {
"EFR",
"IERR",
"ITSR",
"NTSR",
1913 "IRP",
"NRP",
"AMR",
"SSR",
1914 "ILC",
"RILC",
"IER",
"CSR"};
1915 const char *exec_ctxt[] = {
"Task",
"SWI",
"HWI",
"Unknown"};
1921 pr_debug(
"%s: Failed on dev_get_cod_mgr.\n", __func__);
1928 pr_debug(
"%s: Failed on dev_get_node_manager.\n",
1938 pr_debug(
"%s: trace_begin Value 0x%x\n",
1939 __func__, trace_begin);
1941 pr_debug(
"%s: Failed on cod_get_sym_value.\n",
1951 mmu_fault_dbg_info.head[0] = 0;
1952 mmu_fault_dbg_info.head[1] = 0;
1960 status = (*intf_fxns->
brd_read)(bridge_context,
1961 (
u8 *)&mmu_fault_dbg_info, (
u32)trace_begin,
1962 sizeof(mmu_fault_dbg_info), 0);
1973 pr_err(
"%s:No DSP MMU-Fault information available.\n",
1979 total_size = mmu_fault_dbg_info.size;
1988 "allocate stack dump buffer.\n", __func__);
1993 buffer_end = buffer + total_size / 4;
1996 status = (*intf_fxns->
brd_read)(bridge_context,
2000 pr_debug(
"%s: Failed to Read Trace Buffer.\n",
2005 pr_err(
"\nAproximate Crash Position:\n"
2006 "--------------------------\n");
2008 exc_type = buffer[3];
2021 if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
2022 0x1000, &offset_output, name) == 0))
2023 pr_err(
"0x%-8x [\"%s\" + 0x%x]\n", i, name,
2026 pr_err(
"0x%-8x [Unable to match to a symbol.]\n", i);
2030 pr_err(
"\nExecution Info:\n"
2031 "---------------\n");
2034 pr_err(
"Execution context \t%s\n",
2035 exec_ctxt[*buffer++]);
2037 pr_err(
"Execution context corrupt\n");
2041 pr_err(
"Task Handle\t\t0x%x\n", *buffer++);
2042 pr_err(
"Stack Pointer\t\t0x%x\n", *buffer++);
2043 pr_err(
"Stack Top\t\t0x%x\n", *buffer++);
2044 pr_err(
"Stack Bottom\t\t0x%x\n", *buffer++);
2045 pr_err(
"Stack Size\t\t0x%x\n", *buffer++);
2046 pr_err(
"Stack Size In Use\t0x%x\n", *buffer++);
2048 pr_err(
"\nCPU Registers\n"
2049 "---------------\n");
2051 for (i = 0; i < 32; i++) {
2052 if (i == 4 || i == 6 || i == 8)
2053 pr_err(
"A%d 0x%-8x [Function Argument %d]\n",
2056 pr_err(
"A15 0x%-8x [Frame Pointer]\n",
2059 pr_err(
"A%d 0x%x\n", i, *buffer++);
2062 pr_err(
"\nB0 0x%x\n", *buffer++);
2063 pr_err(
"B1 0x%x\n", *buffer++);
2064 pr_err(
"B2 0x%x\n", *buffer++);
2066 if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2067 *buffer, 0x1000, &offset_output, name) == 0))
2069 pr_err(
"B3 0x%-8x [Function Return Pointer:"
2070 " \"%s\" + 0x%x]\n", *buffer, name,
2071 *buffer - offset_output);
2073 pr_err(
"B3 0x%-8x [Function Return Pointer:"
2074 "Unable to match to a symbol.]\n", *buffer);
2078 for (i = 4; i < 32; i++) {
2079 if (i == 4 || i == 6 || i == 8)
2080 pr_err(
"B%d 0x%-8x [Function Argument %d]\n",
2083 pr_err(
"B14 0x%-8x [Data Page Pointer]\n",
2086 pr_err(
"B%d 0x%x\n", i, *buffer++);
2092 pr_err(
"%s 0x%x\n", dsp_regs[i], *buffer++);
2097 for (i = 0; buffer < buffer_end; i++, buffer++) {
2098 if ((*buffer > dyn_ext_base) && (
2099 node_find_addr(node_mgr, *buffer , 0x600,
2100 &offset_output, name) == 0))
2101 pr_err(
"[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2103 *buffer - offset_output);
2105 pr_err(
"[%d] 0x%x\n", i, *buffer);
2123 struct dev_object *dev_object = bridge_ctxt->
dev_obj;
2126 u32 module_dsp_addr;
2128 u32 module_struct_size = 0;
2135 pr_debug(
"%s: Failed on dev_get_intf_fxns.\n", __func__);
2141 pr_debug(
"%s: Failed on dev_get_cod_mgr.\n", __func__);
2149 pr_debug(
"%s: Failed on cod_get_sym_value for _DLModules.\n",
2154 pr_debug(
"%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2157 status = (*intf_fxns->
brd_read)(bridge_context, (
u8 *) &modules_hdr,
2158 (
u32) module_dsp_addr,
sizeof(modules_hdr), 0);
2161 pr_debug(
"%s: Failed failed to read modules header.\n",
2166 module_dsp_addr = modules_hdr.first_module;
2167 module_size = modules_hdr.first_module_size;
2169 pr_debug(
"%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2172 pr_err(
"\nDynamically Loaded Modules:\n"
2173 "---------------------------\n");
2176 while (module_size) {
2182 if (module_size > module_struct_size) {
2183 kfree(module_struct);
2184 module_struct = kzalloc(module_size+128,
GFP_ATOMIC);
2185 module_struct_size = module_size+128;
2186 pr_debug(
"%s: allocated module struct %p %d\n",
2187 __func__, module_struct, module_struct_size);
2192 status = (*intf_fxns->
brd_read)(bridge_context,
2193 (
u8 *)module_struct, module_dsp_addr, module_size, 0);
2197 "%s: Failed to read dll_module struct for 0x%x.\n",
2198 __func__, module_dsp_addr);
2206 pr_debug(
"%s: next module 0x%x %d, this module num sects %d\n",
2207 __func__, module_dsp_addr, module_size,
2214 sect_str = (
char *) &module_struct->
2216 pr_err(
"%s\n", sect_str);
2222 sect_str +=
strlen(sect_str) + 1;
2226 sect_ndx < module_struct->
num_sects; sect_ndx++) {
2227 pr_err(
" Section: 0x%x ",
2228 module_struct->
sects[sect_ndx].sect_load_adr);
2230 if (((
u32) sect_str - (
u32) module_struct) <
2231 module_struct_size) {
2232 pr_err(
"%s\n", sect_str);
2234 sect_str +=
strlen(sect_str)+1;
2236 pr_err(
"<string error>\n");
2237 pr_debug(
"%s: section name sting address "
2238 "is invalid %p\n", __func__, sect_str);
2243 kfree(module_struct);