27 #include <linux/pci.h>
28 #include <linux/slab.h>
45 ((10 / 5 - 1) << 10) |
46 ((15 / 5 + 1) << 12) |
48 ((70 / 5 - 2) << 19) |
63 static bool crystalhd_bring_out_of_rst(
struct crystalhd_adp *adp)
74 rst_clk_cntrl.pll_pwr_dn = 0;
79 rst_clk_cntrl.stop_core_clk = 0;
80 rst_clk_cntrl.sel_alt_clk = 0;
98 rst_deco_cntrl.stop_bcm_7412_clk = 0;
99 rst_deco_cntrl.bcm7412_rst = 1;
104 rst_deco_cntrl.bcm7412_rst = 0;
124 static bool crystalhd_put_in_reset(
struct crystalhd_adp *adp)
136 rst_deco_cntrl.stop_bcm_7412_clk = 1;
151 rst_clk_cntrl.stop_core_clk = 1;
152 rst_clk_cntrl.sel_alt_clk = 1;
157 rst_clk_cntrl.pll_pwr_dn = 1;
180 static void crystalhd_disable_interrupts(
struct crystalhd_adp *adp)
196 static void crystalhd_enable_interrupts(
struct crystalhd_adp *adp)
212 static void crystalhd_clear_errors(
struct crystalhd_adp *adp)
230 static void crystalhd_clear_interrupts(
struct crystalhd_adp *adp)
259 static bool crystalhd_load_firmware_config(
struct crystalhd_adp *adp)
270 for (i = 0; i < 100; ++
i) {
281 static bool crystalhd_start_device(
struct crystalhd_adp *adp)
283 uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
292 if (!crystalhd_bring_out_of_rst(adp)) {
293 BCMLOG_ERR(
"Failed To Bring Link Out Of Reset\n");
297 crystalhd_disable_interrupts(adp);
299 crystalhd_clear_errors(adp);
301 crystalhd_clear_interrupts(adp);
303 crystalhd_enable_interrupts(adp);
318 crystalhd_enable_interrupts(adp);
320 crystalhd_soft_rst(adp);
321 crystalhd_start_dram(adp);
322 crystalhd_enable_uarts(adp);
333 crystalhd_disable_interrupts(adp);
334 crystalhd_clear_errors(adp);
335 crystalhd_clear_interrupts(adp);
337 if (!crystalhd_put_in_reset(adp))
338 BCMLOG_ERR(
"Failed to Put Link To Reset State\n");
354 unsigned long flags = 0;
368 spin_unlock_irqrestore(&hw->
lock, flags);
373 static void crystalhd_hw_free_rx_pkt(
struct crystalhd_hw *hw,
376 unsigned long flags = 0;
384 spin_unlock_irqrestore(&hw->
lock, flags);
396 static void crystalhd_tx_desc_rel_call_back(
void *
context,
void *
data)
407 static void crystalhd_rx_pkt_rel_call_back(
void *
context,
void *
data)
422 crystalhd_hw_free_rx_pkt(hw, pkt);
425 #define crystalhd_hw_delete_ioq(adp, q) \
427 crystalhd_delete_dioq(adp, q); \
431 static void crystalhd_hw_delete_ioqs(
struct crystalhd_hw *hw)
444 #define crystalhd_hw_create_ioq(sts, hw, q, cb) \
446 sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
447 if (sts != BC_STS_SUCCESS) \
448 goto hw_create_ioq_err; \
467 crystalhd_tx_desc_rel_call_back);
469 crystalhd_tx_desc_rel_call_back);
472 crystalhd_rx_pkt_rel_call_back);
474 crystalhd_rx_pkt_rel_call_back);
476 crystalhd_rx_pkt_rel_call_back);
481 crystalhd_hw_delete_ioqs(hw);
488 bool b_188_byte_pkts,
uint8_t flags)
491 uint32_t cpbSize, cpbFullness, fifoSize;
498 }
else if (b_188_byte_pkts) {
510 cpbSize = end - base;
512 cpbFullness = writep - readp;
514 cpbFullness = (end - base) - (readp - writep);
516 fifoSize = cpbSize - cpbFullness;
532 if (!hw || !list_id) {
542 BCMLOG_ERR(
"Find and Fetch Did not find req\n");
565 unsigned long flags = 0;
571 if (!(err_sts & err_mask))
579 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
585 spin_unlock_irqrestore(&hw->
lock, flags);
588 tmp = err_sts & err_mask;
597 unsigned long flags = 0;
603 if (!(err_sts & err_mask))
611 tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
617 spin_unlock_irqrestore(&hw->
lock, flags);
620 tmp = err_sts & err_mask;
647 if (crystalhd_tx_list0_handler(hw, err_sts))
651 if (crystalhd_tx_list1_handler(hw, err_sts))
655 hw->
stats.tx_errors++;
658 static void crystalhd_hw_dump_desc(
struct dma_descriptor *p_dma_desc,
663 if (!p_dma_desc || !cnt)
671 for (ix = ul_desc_index; ix < (ul_desc_index +
cnt); ix++) {
672 BCMLOG(ll,
"%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
673 ((p_dma_desc[ul_desc_index].dma_dir) ?
"TDesc" :
"RDesc"),
675 p_dma_desc[ul_desc_index].buff_addr_high,
676 p_dma_desc[ul_desc_index].buff_addr_low,
677 p_dma_desc[ul_desc_index].next_desc_addr_high,
678 p_dma_desc[ul_desc_index].next_desc_addr_low,
679 p_dma_desc[ul_desc_index].xfer_size,
681 p_dma_desc[ul_desc_index].last_rec_indicator);
696 if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
697 (!sg_cnt && !ioreq->
uinfo.dir_tx)) {
702 for (ix = 0; ix < sg_cnt; ix++) {
705 sg_ix = ix + sg_st_ix;
710 BCMLOG_ERR(
" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
715 if (sg_ix == sg_st_ix) {
716 addr_temp.full_addr += sg_st_off;
719 memset(&desc[ix], 0,
sizeof(desc[ix]));
725 addr_temp.full_addr = desc_phy_addr +
sizeof(
struct dma_descriptor);
729 if ((count + len) > xfr_sz)
734 BCMLOG_ERR(
"inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
735 len, ix, count, xfr_sz, sg_cnt);
741 crystalhd_hw_dump_desc(desc, ix, 1);
747 last_desc_ix = ix - 1;
750 memset(&desc[ix], 0,
sizeof(desc[ix]));
751 addr_temp.full_addr = ioreq->
fb_pa;
767 crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
769 if (count != xfr_sz) {
770 BCMLOG_ERR(
"internal error sz curr:%x exp:%x\n", count, xfr_sz);
783 uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
788 if (!ioreq || !pdesc_mem || !uv_desc_index) {
799 if ((ioreq->
uinfo.dir_tx) && (ioreq->
uinfo.uv_offset)) {
806 desc_paddr_base = pdesc_mem->
phy_addr;
808 if (ioreq->
uinfo.dir_tx || (ioreq->
uinfo.uv_offset == 0)) {
810 xfr_sz = ioreq->
uinfo.xfr_len;
812 sg_cnt = ioreq->
uinfo.uv_sg_ix + 1;
813 xfr_sz = ioreq->
uinfo.uv_offset;
816 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
817 sg_st_ix, sg_st_off, xfr_sz);
824 desc_paddr_base = pdesc_mem->
phy_addr +
829 xfr_sz = ioreq->
uinfo.xfr_len - ioreq->
uinfo.uv_offset;
830 sg_st_ix = ioreq->
uinfo.uv_sg_ix;
831 sg_st_off = ioreq->
uinfo.uv_sg_off;
833 sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
834 sg_st_ix, sg_st_off, xfr_sz);
838 *uv_desc_index = sg_st_ix;
843 static void crystalhd_start_tx_dma_engine(
struct crystalhd_hw *hw)
866 unsigned long flags = 0;
877 crystalhd_disable_interrupts(hw->
adp);
881 dma_cntrl &= ~DMA_START_BIT;
887 while ((l1 ||
l2) && cnt) {
905 BCMLOG_ERR(
"Failed to stop TX DMA.. l1 %d, l2 %d\n", l1,
l2);
906 crystalhd_enable_interrupts(hw->
adp);
912 spin_unlock_irqrestore(&hw->
lock, flags);
914 crystalhd_enable_interrupts(hw->
adp);
936 if (r_offset == w_offset)
939 if (w_offset > r_offset)
946 BCMLOG_ERR(
"Invalid PIB Count (%u)\n", pib_cnt);
967 if (r_offset == w_offset)
1006 n_offset = w_offset + 1;
1011 if (r_offset == n_offset)
1026 if (!src_pib || !dst_pib) {
1033 dst_pib->
width = src_pib->
ppb.width;
1037 dst_pib->
flags = src_pib->
ppb.flags;
1046 static void crystalhd_hw_proc_pib(
struct crystalhd_hw *hw)
1054 pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1059 for (cnt = 0; cnt < pib_cnt; cnt++) {
1061 pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1071 AppPib = &rx_pkt->
pib;
1072 cpy_pib_to_app(&src_pib, AppPib);
1075 "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1076 rx_pkt->
pib.picture_number,
1077 rx_pkt->
pib.aspect_ratio,
1078 rx_pkt->
pib.chroma_format,
1079 rx_pkt->
pib.colour_primaries,
1080 rx_pkt->
pib.frame_rate,
1084 rx_pkt->
pib.pulldown,
1091 crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1095 static void crystalhd_start_rx_dma_engine(
struct crystalhd_hw *hw)
1100 if (!(dma_cntrl & DMA_START_BIT)) {
1106 if (!(dma_cntrl & DMA_START_BIT)) {
1114 static void crystalhd_stop_rx_dma_engine(
struct crystalhd_hw *hw)
1116 uint32_t dma_cntrl = 0, count = 30;
1117 uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1120 if ((dma_cntrl & DMA_START_BIT)) {
1121 dma_cntrl &= ~DMA_START_BIT;
1126 if ((dma_cntrl & DMA_START_BIT)) {
1127 dma_cntrl &= ~DMA_START_BIT;
1132 while ((l0y || l0uv || l1y || l1uv) && count) {
1173 uint32_t y_low_addr_reg, y_high_addr_reg;
1174 uint32_t uv_low_addr_reg, uv_high_addr_reg;
1176 unsigned long flags;
1178 if (!hw || !rx_pkt) {
1191 spin_unlock_irqrestore(&hw->
rx_lock, flags);
1211 spin_unlock_irqrestore(&hw->
rx_lock, flags);
1215 crystalhd_start_rx_dma_engine(hw);
1217 desc_addr.full_addr = rx_pkt->
desc_mem.phy_addr;
1234 enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1246 uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1265 static void crystalhd_hw_finalize_pause(
struct crystalhd_hw *hw)
1272 if (dma_cntrl & DMA_START_BIT) {
1273 dma_cntrl &= ~DMA_START_BIT;
1278 if (dma_cntrl & DMA_START_BIT) {
1279 dma_cntrl &= ~DMA_START_BIT;
1305 BCMLOG_ERR(
"Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1313 crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1314 rx_pkt->
dio_req->uinfo.y_done_sz = y_dw_dnsz;
1317 rx_pkt->
dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1324 return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1345 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1351 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1367 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1373 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1382 if (y_err_sts & GET_Y0_ERR_MSK) {
1387 if (uv_err_sts & GET_UV0_ERR_MSK) {
1413 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1420 tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1436 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1443 tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1452 if (y_err_sts & GET_Y1_ERR_MSK) {
1457 if (uv_err_sts & GET_UV1_ERR_MSK) {
1468 unsigned long flags;
1471 uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1489 ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1491 ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1502 hw->
stats.rx_errors++;
1503 crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1506 "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1507 i, hw->
stats.rx_errors, y_err_sts,
1508 uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
1518 spin_unlock_irqrestore(&hw->
rx_lock, flags);
1522 crystalhd_rx_pkt_done(hw, i, comp_sts);
1531 crystalhd_hw_finalize_pause(hw);
1544 switch (fw_cmd->
cmd[0]) {
1553 if (!(crystalhd_load_firmware_config(hw->
adp))) {
1572 rst_cntrl_reg.bcm_7412_rst = 1;
1576 rst_cntrl_reg.bcm_7412_rst = 0;
1632 if (!adp || !buffer || !sz) {
1638 if (!(reg_data & 0x02)) {
1639 BCMLOG_ERR(
"Invalid hw config.. otp not programmed\n");
1652 while (reg_data !=
BC_BIT(4)) {
1656 BCMLOG_ERR(
"Firmware Download RDY Timeout.\n");
1665 for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1676 for (cnt = 0; cnt < 8; cnt++) {
1677 uint32_t swapped_data = *temp_buff;
1727 if (!hw || !fw_cmd) {
1732 cmd_buff = fw_cmd->
cmd;
1733 res_buff = fw_cmd->
rsp;
1735 if (!cmd_buff || !res_buff) {
1736 BCMLOG_ERR(
"Invalid Parameters for F/W Command\n");
1759 }
else if (rc == -
EBUSY) {
1762 }
else if (rc == -
EINTR) {
1785 BCMLOG_ERR(
"res_buff[2] != C011_RET_SUCCESS\n");
1789 sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1791 BCMLOG_ERR(
"crystalhd_fw_cmd_post_proc Failed.\n");
1805 hw->
stats.num_interrupts++;
1814 hw->
stats.dev_interrupts++;
1817 if (deco_intr && (deco_intr != 0xdeaddead)) {
1819 if (deco_intr & 0x80000000) {
1827 if (deco_intr &
BC_BIT(1))
1828 crystalhd_hw_proc_pib(hw);
1837 crystalhd_rx_isr(hw, intr_sts);
1840 crystalhd_tx_isr(hw, intr_sts);
1875 crystalhd_start_device(hw->
adp);
1913 if (!hw || !hw->
adp) {
1918 sts = crystalhd_hw_create_ioqs(hw);
1969 rpkt->
desc_mem.phy_addr = phy_addr;
1972 crystalhd_hw_free_rx_pkt(hw, rpkt);
1983 if (!hw || !hw->
adp) {
1989 crystalhd_hw_delete_ioqs(hw);
1992 if (hw->
tx_pkt_pool[i].desc_mem.pdma_desc_start) {
2004 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2022 uint32_t first_desc_u_addr, first_desc_l_addr;
2027 unsigned long flags;
2030 if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2042 rc = crystalhd_code_in_full(hw->
adp, ioreq->
uinfo.xfr_len,
2045 hw->
stats.cin_busy++;
2051 if (!tx_dma_packet) {
2056 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2076 tx_dma_packet->
dio_req = ioreq;
2093 spin_unlock_irqrestore(&hw->
lock, flags);
2105 crystalhd_start_tx_dma_engine(hw);
2125 if (!hw || !list_id) {
2130 crystalhd_stop_tx_dma_engine(hw);
2143 if (!hw || !ioreq) {
2148 rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2157 sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->
desc_mem, &uv_desc_ix);
2169 sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2185 if (!hw || !ioreq || !pib) {
2207 crystalhd_hw_free_rx_pkt(hw, rpkt);
2228 sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2246 crystalhd_stop_rx_dma_engine(hw);
2251 crystalhd_rx_pkt_rel_call_back(hw, temp);
2259 hw->
stats.pause_cnt++;
2264 crystalhd_hw_finalize_pause(hw);
2294 sts = crystalhd_put_ddr2sleep(hw);
2296 BCMLOG_ERR(
"Failed to Put DDR To Sleep!!\n");
2300 if (!crystalhd_stop_device(hw->
adp)) {
2362 reg |= vco_mg << 12;
2376 for (i = 0; i < 10; i++) {
2379 if (reg & 0x00020000) {