25 #define bfa_ioc_ct_sync_pos(__ioc) \
26 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH 16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
38 static void bfa_ioc_ct_firmware_unlock(
struct bfa_ioc_s *
ioc);
39 static void bfa_ioc_ct_notify_fail(
struct bfa_ioc_s *
ioc);
40 static void bfa_ioc_ct_ownership_reset(
struct bfa_ioc_s *
ioc);
43 static void bfa_ioc_ct_sync_leave(
struct bfa_ioc_s *
ioc);
106 bfa_ioc_ct_firmware_unlock(
struct bfa_ioc_s *ioc)
129 bfa_ioc_ct_notify_fail(
struct bfa_ioc_s *ioc)
166 static struct {
u32 hfn,
lpu; } ct_p1reg[] = {
184 bfa_ioc_ct_reg_init(
struct bfa_ioc_s *ioc)
191 ioc->
ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
192 ioc->
ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
193 ioc->
ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
199 ioc->
ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
200 ioc->
ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
207 ioc->
ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
208 ioc->
ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
243 bfa_ioc_ct2_reg_init(
struct bfa_ioc_s *ioc)
252 ioc->
ioc_regs.host_page_num_fn = rb + ct2_reg[
port].hfn_pgn;
255 ioc->
ioc_regs.lpu_read_stat = rb + ct2_reg[
port].lpu_read;
304 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
306 bfa_ioc_ct_map_port(
struct bfa_ioc_s *ioc)
323 bfa_ioc_ct2_map_port(
struct bfa_ioc_s *ioc)
353 if ((!msix && mode) || (msix && !
mode))
386 bfa_ioc_ct_ownership_reset(
struct bfa_ioc_s *ioc)
405 bfa_ioc_ct_sync_start(
struct bfa_ioc_s *ioc)
425 return bfa_ioc_ct_sync_complete(ioc);
432 bfa_ioc_ct_sync_join(
struct bfa_ioc_s *ioc)
441 bfa_ioc_ct_sync_leave(
struct bfa_ioc_s *ioc)
451 bfa_ioc_ct_sync_ack(
struct bfa_ioc_s *ioc)
460 bfa_ioc_ct_sync_complete(
struct bfa_ioc_s *ioc)
476 tmp_ackd = sync_ackd;
481 if (sync_reqd == sync_ackd) {
494 if (tmp_ackd != sync_ackd)
523 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
526 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
527 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
528 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
538 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
541 hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
542 hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
544 hwif_ct2.ioc_isr_mode_set =
NULL;
551 #define HOSTFN_MSIX_DEFAULT 64
552 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
553 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
554 #define __MSIX_VT_NUMVT__MK 0x003ff800
555 #define __MSIX_VT_NUMVT__SH 11
556 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
557 #define __MSIX_VT_OFST_ 0x000007ff
581 u32 pll_sclk, pll_fclk, r32;
645 bfa_ioc_ct2_sclk_init(
void __iomem *rb)
690 bfa_ioc_ct2_lclk_init(
void __iomem *rb)
730 bfa_ioc_ct2_mem_init(
void __iomem *rb)
755 bfa_ioc_ct2_enable_flash(
void __iomem *rb)
765 #define CT2_NFC_MAX_DELAY 1000
766 #define CT2_NFC_PAUSE_MAX_DELAY 4000
767 #define CT2_NFC_VER_VALID 0x147
768 #define CT2_NFC_STATE_RUNNING 0x20000001
769 #define BFA_IOC_PLL_POLL 1000000
772 bfa_ioc_ct2_nfc_halted(
void __iomem *rb)
784 bfa_ioc_ct2_nfc_halt(
void __iomem *rb)
790 if (bfa_ioc_ct2_nfc_halted(rb))
794 WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
798 bfa_ioc_ct2_nfc_resume(
void __iomem *rb)
814 bfa_ioc_ct2_clk_reset(
void __iomem *rb)
818 bfa_ioc_ct2_sclk_init(rb);
819 bfa_ioc_ct2_lclk_init(rb);
835 bfa_ioc_ct2_nfc_clk_reset(
void __iomem *rb)
866 bfa_ioc_ct2_wait_till_nfc_running(
void __iomem *rb)
871 if (bfa_ioc_ct2_nfc_halted(rb))
872 bfa_ioc_ct2_nfc_resume(rb);
887 u32 wgn, r32, nfc_ver;
895 bfa_ioc_ct2_clk_reset(rb);
896 bfa_ioc_ct2_enable_flash(rb);
900 bfa_ioc_ct2_clk_reset(rb);
901 bfa_ioc_ct2_enable_flash(rb);
909 bfa_ioc_ct2_wait_till_nfc_running(rb);
911 bfa_ioc_ct2_nfc_clk_reset(rb);
913 bfa_ioc_ct2_nfc_halt(rb);
915 bfa_ioc_ct2_clk_reset(rb);
917 bfa_ioc_ct2_clk_reset(rb);
945 bfa_ioc_ct2_mem_init(rb);