25 #define bfa_ioc_ct_sync_pos(__ioc) \
26 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH 16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
37 static bool bfa_ioc_ct_firmware_lock(
struct bfa_ioc *
ioc);
38 static void bfa_ioc_ct_firmware_unlock(
struct bfa_ioc *
ioc);
39 static void bfa_ioc_ct_reg_init(
struct bfa_ioc *
ioc);
40 static void bfa_ioc_ct2_reg_init(
struct bfa_ioc *
ioc);
41 static void bfa_ioc_ct_map_port(
struct bfa_ioc *
ioc);
42 static void bfa_ioc_ct2_map_port(
struct bfa_ioc *
ioc);
43 static void bfa_ioc_ct_isr_mode_set(
struct bfa_ioc *
ioc,
bool msix);
44 static void bfa_ioc_ct_notify_fail(
struct bfa_ioc *
ioc);
45 static void bfa_ioc_ct_ownership_reset(
struct bfa_ioc *
ioc);
46 static bool bfa_ioc_ct_sync_start(
struct bfa_ioc *
ioc);
47 static void bfa_ioc_ct_sync_join(
struct bfa_ioc *
ioc);
48 static void bfa_ioc_ct_sync_leave(
struct bfa_ioc *
ioc);
49 static void bfa_ioc_ct_sync_ack(
struct bfa_ioc *
ioc);
50 static bool bfa_ioc_ct_sync_complete(
struct bfa_ioc *
ioc);
58 .ioc_pll_init = bfa_ioc_ct_pll_init,
59 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
60 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
61 .ioc_reg_init = bfa_ioc_ct_reg_init,
62 .ioc_map_port = bfa_ioc_ct_map_port,
63 .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
64 .ioc_notify_fail = bfa_ioc_ct_notify_fail,
65 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
66 .ioc_sync_start = bfa_ioc_ct_sync_start,
67 .ioc_sync_join = bfa_ioc_ct_sync_join,
68 .ioc_sync_leave = bfa_ioc_ct_sync_leave,
69 .ioc_sync_ack = bfa_ioc_ct_sync_ack,
70 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
74 .ioc_pll_init = bfa_ioc_ct2_pll_init,
75 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
76 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
77 .ioc_reg_init = bfa_ioc_ct2_reg_init,
78 .ioc_map_port = bfa_ioc_ct2_map_port,
80 .ioc_isr_mode_set =
NULL,
81 .ioc_notify_fail = bfa_ioc_ct_notify_fail,
82 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
83 .ioc_sync_start = bfa_ioc_ct_sync_start,
84 .ioc_sync_join = bfa_ioc_ct_sync_join,
85 .ioc_sync_leave = bfa_ioc_ct_sync_leave,
86 .ioc_sync_ack = bfa_ioc_ct_sync_ack,
87 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
157 bfa_ioc_ct_firmware_unlock(
struct bfa_ioc *ioc)
183 bfa_ioc_ct_notify_fail(
struct bfa_ioc *ioc)
193 static const struct {
205 static const struct {
216 static const struct {
226 static const struct {
243 bfa_ioc_ct_reg_init(
struct bfa_ioc *ioc)
250 ioc->
ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
251 ioc->
ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
252 ioc->
ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
258 ioc->
ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
259 ioc->
ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
266 ioc->
ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
267 ioc->
ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
302 bfa_ioc_ct2_reg_init(
struct bfa_ioc *ioc)
311 ioc->
ioc_regs.host_page_num_fn = rb + ct2_reg[
port].hfn_pgn;
314 ioc->
ioc_regs.lpu_read_stat = rb + ct2_reg[
port].lpu_read;
361 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
363 bfa_ioc_ct_map_port(
struct bfa_ioc *ioc)
378 bfa_ioc_ct2_map_port(
struct bfa_ioc *ioc)
389 bfa_ioc_ct_isr_mode_set(
struct bfa_ioc *ioc,
bool msix)
402 if ((!msix && mode) || (msix && !
mode))
431 #define HOSTFN_MSIX_DEFAULT 64
432 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
433 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
434 #define __MSIX_VT_NUMVT__MK 0x003ff800
435 #define __MSIX_VT_NUMVT__SH 11
436 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
437 #define __MSIX_VT_OFST_ 0x000007ff
460 bfa_ioc_ct_ownership_reset(
struct bfa_ioc *ioc)
477 bfa_ioc_ct_sync_start(
struct bfa_ioc *ioc)
497 return bfa_ioc_ct_sync_complete(ioc);
501 bfa_ioc_ct_sync_join(
struct bfa_ioc *ioc)
510 bfa_ioc_ct_sync_leave(
struct bfa_ioc *ioc)
520 bfa_ioc_ct_sync_ack(
struct bfa_ioc *ioc)
528 bfa_ioc_ct_sync_complete(
struct bfa_ioc *ioc)
544 tmp_ackd = sync_ackd;
549 if (sync_reqd == sync_ackd) {
562 if (tmp_ackd != sync_ackd)
571 u32 pll_sclk, pll_fclk, r32;
646 bfa_ioc_ct2_sclk_init(
void __iomem *rb)
698 bfa_ioc_ct2_lclk_init(
void __iomem *rb)
738 bfa_ioc_ct2_mem_init(
void __iomem *rb)
757 bfa_ioc_ct2_sclk_init(rb);
758 bfa_ioc_ct2_lclk_init(rb);
781 #define CT2_NFC_MAX_DELAY 1000
782 #define CT2_NFC_VER_VALID 0x143
783 #define BFA_IOC_PLL_POLL 1000000
786 bfa_ioc_ct2_nfc_halted(
void __iomem *rb)
798 bfa_ioc_ct2_nfc_resume(
void __iomem *rb)
816 volatile u32 wgn, r32;
825 if (bfa_ioc_ct2_nfc_halted(rb))
826 bfa_ioc_ct2_nfc_resume(rb);
857 bfa_ioc_ct2_sclk_init(rb);
858 bfa_ioc_ct2_lclk_init(rb);
899 bfa_ioc_ct2_mem_init(rb);