30 #define BFA_IOC_TOV 3000
31 #define BFA_IOC_HWSEM_TOV 500
32 #define BFA_IOC_HB_TOV 500
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
36 #define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
41 #define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
46 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
71 #define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
82 static void bfa_ioc_timeout(
void *
ioc);
96 static void bfa_ioc_pf_fwmismatch(
struct bfa_ioc_s *
ioc);
144 #define bfa_iocpf_timer_start(__ioc) \
145 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
147 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
149 #define bfa_iocpf_poll_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
153 #define bfa_sem_timer_start(__ioc) \
154 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
155 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
156 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
161 static void bfa_iocpf_timeout(
void *ioc_arg);
162 static void bfa_iocpf_sem_timeout(
void *ioc_arg);
163 static void bfa_iocpf_poll_timeout(
void *ioc_arg);
266 bfa_ioc_sm_reset_entry(
struct bfa_ioc_s *ioc)
285 bfa_ioc_disable_comp(ioc);
299 bfa_ioc_sm_enabling_entry(
struct bfa_ioc_s *ioc)
351 bfa_ioc_sm_getattr_entry(
struct bfa_ioc_s *ioc)
354 bfa_ioc_send_getattr(ioc);
396 bfa_ioc_sm_op_entry(
struct bfa_ioc_s *ioc)
402 bfa_ioc_hb_monitor(ioc);
426 if (ioc->
iocpf.auto_recover)
431 bfa_ioc_fail_notify(ioc);
444 bfa_ioc_sm_disabling_entry(
struct bfa_ioc_s *ioc)
476 bfa_ioc_disable_comp(ioc);
488 bfa_ioc_sm_disabled_entry(
struct bfa_ioc_s *ioc)
490 bfa_ioc_disable_comp(ioc);
504 ioc->
cbfn->disable_cbfn(ioc->
bfa);
519 bfa_ioc_sm_fail_retry_entry(
struct bfa_ioc_s *ioc)
572 bfa_ioc_sm_fail_entry(
struct bfa_ioc_s *ioc)
612 bfa_ioc_sm_hwfail_entry(
struct bfa_ioc_s *ioc)
628 ioc->
cbfn->disable_cbfn(ioc->
bfa);
652 bfa_iocpf_sm_reset_entry(
struct bfa_iocpf_s *iocpf)
685 bfa_iocpf_sm_fwcheck_entry(
struct bfa_iocpf_s *iocpf)
688 u32 r32, fwstate, pgnum, pgoff, loff = 0;
694 r32 =
readl(iocpf->
ioc->ioc_regs.ioc_init_sem_reg);
697 r32 =
readl(iocpf->
ioc->ioc_regs.ioc_init_sem_reg);
701 fwstate =
readl(iocpf->
ioc->ioc_regs.ioc_fwstate);
703 writel(1, iocpf->
ioc->ioc_regs.ioc_init_sem_reg);
710 writel(1, iocpf->
ioc->ioc_regs.ioc_init_sem_reg);
719 writel(pgnum, iocpf->
ioc->ioc_regs.host_page_num_fn);
739 writel(1, iocpf->
ioc->ioc_regs.ioc_init_sem_reg);
742 bfa_ioc_hw_sem_get(iocpf->
ioc);
797 bfa_iocpf_sm_mismatch_entry(
struct bfa_iocpf_s *iocpf)
803 bfa_ioc_pf_fwmismatch(iocpf->
ioc);
844 bfa_iocpf_sm_semwait_entry(
struct bfa_iocpf_s *iocpf)
846 bfa_ioc_hw_sem_get(iocpf->
ioc);
886 bfa_iocpf_sm_hwinit_entry(
struct bfa_iocpf_s *iocpf)
927 bfa_iocpf_sm_enabling_entry(
struct bfa_iocpf_s *iocpf)
933 iocpf->
ioc->cbfn->reset_cbfn(iocpf->
ioc->bfa);
934 bfa_ioc_send_enable(iocpf->
ioc);
980 bfa_iocpf_sm_ready_entry(
struct bfa_iocpf_s *iocpf)
1011 bfa_iocpf_sm_disabling_entry(
struct bfa_iocpf_s *iocpf)
1014 bfa_ioc_send_disable(iocpf->
ioc);
1053 bfa_iocpf_sm_disabling_sync_entry(
struct bfa_iocpf_s *iocpf)
1055 bfa_ioc_hw_sem_get(iocpf->
ioc);
1092 bfa_iocpf_sm_disabled_entry(
struct bfa_iocpf_s *iocpf)
1094 bfa_ioc_mbox_flush(iocpf->
ioc);
1121 bfa_iocpf_sm_initfail_sync_entry(
struct bfa_iocpf_s *iocpf)
1124 bfa_ioc_hw_sem_get(iocpf->
ioc);
1171 bfa_iocpf_sm_initfail_entry(
struct bfa_iocpf_s *iocpf)
1202 bfa_iocpf_sm_fail_sync_entry(
struct bfa_iocpf_s *iocpf)
1207 bfa_ioc_lpu_stop(iocpf->
ioc);
1212 bfa_ioc_mbox_flush(iocpf->
ioc);
1214 bfa_ioc_hw_sem_get(iocpf->
ioc);
1262 bfa_iocpf_sm_fail_entry(
struct bfa_iocpf_s *iocpf)
1307 bfa_ioc_disable_comp(
struct bfa_ioc_s *ioc)
1309 ioc->
cbfn->disable_cbfn(ioc->
bfa);
1318 #define BFA_SEM_SPINCNT 3000
1320 r32 =
readl(sem_reg);
1325 r32 =
readl(sem_reg);
1335 bfa_ioc_hw_sem_get(
struct bfa_ioc_s *ioc)
1361 bfa_ioc_lmem_init(
struct bfa_ioc_s *ioc)
1365 #define PSS_LMEM_INIT_TIME 10000
1390 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1398 bfa_ioc_lpu_start(
struct bfa_ioc_s *ioc)
1434 u32 *fwsig = (
u32 *) fwhdr;
1444 loff +=
sizeof(
u32);
1478 bfa_ioc_fwver_valid(
struct bfa_ioc_s *ioc,
u32 boot_env)
1487 bfa_trc(ioc, fwhdr.signature);
1492 if (
swab32(fwhdr.bootenv) != boot_env) {
1536 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1540 bfa_ioc_poll_fwinit(ioc);
1549 bfa_ioc_poll_fwinit(ioc);
1567 bfa_ioc_msgflush(ioc);
1576 bfa_ioc_poll_fwinit(ioc);
1580 bfa_ioc_timeout(
void *ioc_arg)
1591 u32 *msgp = (
u32 *) ioc_msg;
1602 for (i = 0; i < len /
sizeof(
u32); i++)
1617 bfa_ioc_send_enable(
struct bfa_ioc_s *ioc)
1631 bfa_ioc_send_disable(
struct bfa_ioc_s *ioc)
1641 bfa_ioc_send_getattr(
struct bfa_ioc_s *ioc)
1652 bfa_ioc_hb_check(
void *cbarg)
1659 bfa_ioc_recover(ioc);
1665 bfa_ioc_mbox_poll(ioc);
1670 bfa_ioc_hb_monitor(
struct bfa_ioc_s *ioc)
1680 bfa_ioc_download_fw(
struct bfa_ioc_s *ioc,
u32 boot_type,
1712 loff +=
sizeof(
u32);
1745 bfa_ioc_getattr_reply(
struct bfa_ioc_s *ioc)
1762 bfa_ioc_mbox_attach(
struct bfa_ioc_s *ioc)
1767 INIT_LIST_HEAD(&mod->
cmd_q);
1778 bfa_ioc_mbox_poll(
struct bfa_ioc_s *ioc)
1787 if (list_empty(&mod->
cmd_q))
1808 bfa_ioc_mbox_flush(
struct bfa_ioc_s *ioc)
1813 while (!list_empty(&mod->
cmd_q))
1826 bfa_ioc_smem_read(
struct bfa_ioc_s *ioc,
void *tbuf,
u32 soff,
u32 sz)
1849 len = sz/
sizeof(
u32);
1851 for (i = 0; i < len; i++) {
1854 loff +=
sizeof(
u32);
1906 len = sz/
sizeof(
u32);
1908 for (i = 0; i < len; i++) {
1910 loff +=
sizeof(
u32);
1934 bfa_ioc_fail_notify(
struct bfa_ioc_s *ioc)
1941 ioc->
cbfn->hbfail_cbfn(ioc->
bfa);
1947 "Heart Beat of IOC has failed\n");
1953 bfa_ioc_pf_fwmismatch(
struct bfa_ioc_s *ioc)
1961 "Running firmware version is incompatible "
1962 "with the driver version\n");
1982 bfa_ioc_lmem_init(ioc);
2016 bfa_ioc_msgflush(ioc);
2017 bfa_ioc_download_fw(ioc, boot_type, boot_env);
2018 bfa_ioc_lpu_start(ioc);
2088 switch (msg->
mh.msg_id) {
2104 bfa_ioc_getattr_reply(ioc);
2129 ioc->
iocpf.ioc = ioc;
2131 bfa_ioc_mbox_attach(ioc);
2298 mod->
mbhdlr[mc].cbfn = mcfuncs[mc];
2330 if (!list_empty(&mod->
cmd_q)) {
2364 mc = m.
mh.msg_class;
2381 bfa_ioc_mbox_poll(ioc);
2413 #define bfa_ioc_state_disabled(__sm) \
2414 (((__sm) == BFI_IOC_UNINIT) || \
2415 ((__sm) == BFI_IOC_INITING) || \
2416 ((__sm) == BFI_IOC_HWINIT) || \
2417 ((__sm) == BFI_IOC_DISABLED) || \
2418 ((__sm) == BFI_IOC_FAIL) || \
2419 ((__sm) == BFI_IOC_CFG_DISABLED))
2456 #define BFA_MFG_NAME "Brocade"
2463 ioc_attr = ioc->
attr;
2521 memcpy((
void *)serial_num,
2522 (
void *)ioc->
attr->brcd_serialnum,
2544 chip_rev[4] = ioc->
attr->asic_rev;
2552 memcpy(optrom_ver, ioc->
attr->optrom_version,
2571 ioc_attr = ioc->
attr;
2586 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->
iocpf.fsm);
2644 return ioc->
attr->fcoe_mac;
2646 return ioc->
attr->mac;
2654 m = ioc->
attr->mfg_mac;
2681 aen_entry->aen_data.ioc.pwwn = ioc->
attr->pwwn;
2684 aen_entry->aen_data.ioc.pwwn = ioc->
attr->pwwn;
2696 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2738 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2744 bfa_ioc_send_fwsync(
struct bfa_ioc_s *ioc)
2758 u32 fwsync_iter = 1000;
2760 bfa_ioc_send_fwsync(ioc);
2789 if (*offset >= smem_len) {
2790 *offset = *buflen = 0;
2802 bfa_ioc_fwsync(ioc);
2804 if ((loff + dlen) >= smem_len)
2805 dlen = smem_len - loff;
2807 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2810 *offset = *buflen = 0;
2816 if (*offset >= smem_len)
2842 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2863 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2901 bfa_iocpf_timeout(
void *ioc_arg)
2910 bfa_iocpf_sem_timeout(
void *ioc_arg)
2914 bfa_ioc_hw_sem_get(ioc);
2918 bfa_ioc_poll_fwinit(
struct bfa_ioc_s *ioc)
2930 bfa_iocpf_timeout(ioc);
2938 bfa_iocpf_poll_timeout(
void *ioc_arg)
2942 bfa_ioc_poll_fwinit(ioc);
2956 INIT_LIST_HEAD(&timedout_q);
2978 while (!list_empty(&timedout_q)) {
3024 cfg_inst = &cfg->
inst[
i];
3026 be16 = cfg_inst->
pf_cfg[
j].pers;
3028 be16 = cfg_inst->
pf_cfg[
j].num_qpairs;
3030 be16 = cfg_inst->
pf_cfg[
j].num_vectors;
3032 be16 = cfg_inst->
pf_cfg[
j].bw_min;
3034 be16 = cfg_inst->
pf_cfg[
j].bw_max;
3050 switch (msg->
mh.msg_id) {
3055 bfa_ablk_config_swap(ablk->
cfg);
3161 ablk->
cfg = ablk_cfg;
3163 ablk->
cbarg = cbarg;
3195 ablk->
cbarg = cbarg;
3227 ablk->
cbarg = cbarg;
3256 ablk->
cbarg = cbarg;
3287 ablk->
cbarg = cbarg;
3319 ablk->
cbarg = cbarg;
3349 ablk->
cbarg = cbarg;
3376 ablk->
cbarg = cbarg;
3392 static void bfa_sfp_getdata_send(
struct bfa_sfp_s *sfp);
3393 static void bfa_sfp_media_get(
struct bfa_sfp_s *sfp);
3408 bfa_cb_sfp_state_query(
struct bfa_sfp_s *sfp)
3412 bfa_sfp_media_get(sfp);
3448 bfa_cb_sfp_show(sfp);
3453 bfa_cb_sfp_state_query(sfp);
3480 aen_entry->aen_data.port.pwwn = sfp->
ioc->attr->pwwn;
3483 switch (rsp->
event) {
3498 aen_entry->aen_data.port.level = rsp->
pomlvl;
3514 bfa_sfp_getdata_send(
struct bfa_sfp_s *sfp)
3546 bfa_sfp_getdata_send(sfp);
3557 switch (rsp->
event) {
3561 bfa_sfp_scn_aen_post(sfp, rsp);
3566 bfa_sfp_scn_aen_post(sfp, rsp);
3571 bfa_sfp_scn_aen_post(sfp, rsp);
3575 bfa_sfp_scn_aen_post(sfp, rsp);
3580 bfa_sfp_scn_aen_post(sfp, rsp);
3635 bfa_cb_sfp_show(sfp);
3643 bfa_cb_sfp_state_query(sfp);
3651 bfa_sfp_state_query(
struct bfa_sfp_s *sfp)
3668 bfa_sfp_media_get(
struct bfa_sfp_s *sfp)
3701 else if (e10g.r.e10g_sr)
3703 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3705 else if (e10g.r.e10g_unall)
3722 if (e10g.
r.e10g_sr || e10g.
r.e10g_lr)
3751 switch (msg->
mh.msg_id) {
3753 bfa_sfp_show_comp(sfp, msg);
3757 bfa_sfp_scn(sfp, msg);
3875 bfa_sfp_state_query(sfp);
3880 bfa_sfp_media_get(sfp);
3913 bfa_sfp_state_query(sfp);
3933 return bfa_sfp_speed_valid(sfp, portspeed);
3945 #define BFA_FLASH_SEG_SZ 2048
3946 #define BFA_FLASH_DMA_BUF_SZ \
3947 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3960 aen_entry->aen_data.audit.pwwn = ioc->
attr->pwwn;
3961 aen_entry->aen_data.audit.partition_inst = inst;
3962 aen_entry->aen_data.audit.partition_type =
type;
4004 bfa_flash_query_send(
void *cbarg)
4055 bfa_flash_read_send(
void *cbarg)
4080 bfa_flash_erase_send(
void *cbarg)
4100 bfa_flash_intr(
void *flasharg,
struct bfi_mbmsg_s *msg)
4123 switch (msg->
mh.msg_id) {
4137 for (i = 0; i < attr->
npart; i++) {
4138 attr->
part[
i].part_type =
4140 attr->
part[
i].part_instance =
4142 attr->
part[
i].part_off =
4144 attr->
part[
i].part_size =
4146 attr->
part[
i].part_len =
4148 attr->
part[
i].part_status =
4153 bfa_flash_cb(flash);
4159 bfa_flash_cb(flash);
4166 bfa_flash_cb(flash);
4169 bfa_flash_write_send(flash);
4177 bfa_flash_cb(flash);
4188 bfa_flash_cb(flash);
4190 bfa_flash_read_send(flash);
4307 flash->
cbarg = cbarg;
4308 flash->
ubuf = (
u8 *) attr;
4309 bfa_flash_query_send(flash);
4343 flash->
cbarg = cbarg;
4347 bfa_flash_erase_send(flash);
4385 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4398 flash->
cbarg = cbarg;
4406 bfa_flash_write_send(flash);
4442 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4452 flash->
cbarg = cbarg;
4459 bfa_flash_read_send(flash);
4468 #define BFA_DIAG_MEMTEST_TOV 50000
4469 #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000)
4517 bfa_diag_memtest_done(
void *cbarg)
4523 u32 pgnum, pgoff,
i;
4533 *((
u32 *) res + i) =
4535 loff +=
sizeof(
u32);
4582 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4608 u32 rsp_data = diag_rsp->
data;
4612 bfa_trc(diag, rsp_dma_status);
4616 pat = (diag->
fwping.count & 0x1) ? ~(diag->
fwping.data) :
4619 if (diag->
fwping.data != rsp_data) {
4621 diag->
fwping.result->dmastatus =
4630 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4631 if (*((
u32 *)diag->
fwping.dbuf_kva + i) != pat) {
4636 diag->
fwping.result->dmastatus =
4661 diag_tempsensor_send(
struct bfa_diag_s *diag)
4688 diag->
tsensor.temp->ts_junc = rsp->ts_junc;
4689 diag->
tsensor.temp->ts_brd = rsp->ts_brd;
4693 diag->
tsensor.temp->status = rsp->status;
4695 diag->
tsensor.temp->brd_temp =
4698 diag->
tsensor.temp->brd_temp = 0;
4731 ledtest->
freq = 500 / ledtest->
freq;
4733 if (ledtest->
freq == 0)
4775 diag_portbeacon_comp(
struct bfa_diag_s *diag)
4791 switch (msg->
mh.msg_id) {
4793 diag_portbeacon_comp(diag);
4851 bfa_diag_memtest_done, diag, memtest_tov);
4899 diag->
fwping.result->data = 0;
4903 diag_fwping_send(diag);
4940 diag_tempsensor_send(diag);
4969 diag_ledtest_send(diag, ledtest);
4989 bfa_trc(diag, link_e2e_beacon);
4998 if (diag->
beacon.state && beacon)
5002 diag->
beacon.link_e2e = link_e2e_beacon;
5007 diag_portbeacon_send(diag, beacon, sec);
5047 diag->
fwping.dbuf_kva = dm_kva;
5048 diag->
fwping.dbuf_pa = dm_pa;
5055 #define BFA_PHY_DMA_BUF_SZ 0x02000
5056 #define BFA_PHY_LOCK_STATUS 0x018878
5059 bfa_phy_ntoh32(
u32 *obuf,
u32 *ibuf,
int sz)
5063 for (i = 0; i <
m; i++)
5101 bfa_phy_query_send(
void *cbarg)
5120 bfa_phy_write_send(
void *cbarg)
5145 for (i = 0; i < sz; i++)
5160 bfa_phy_read_send(
void *cbarg)
5184 bfa_phy_stats_send(
void *cbarg)
5291 if (!bfa_phy_present(phy))
5307 bfa_phy_query_send(phy);
5331 if (!bfa_phy_present(phy))
5346 phy->
ubuf = (
u8 *) stats;
5347 bfa_phy_stats_send(phy);
5375 if (!bfa_phy_present(phy))
5382 if (!len || (len & 0x03))
5399 bfa_phy_write_send(phy);
5426 if (!bfa_phy_present(phy))
5433 if (!len || (len & 0x03))
5449 bfa_phy_read_send(phy);
5483 switch (msg->
mh.msg_id) {
5530 bfa_phy_write_send(phy);
5546 int i, sz = len >> 1;
5551 for (i = 0; i < sz; i++)
5563 bfa_phy_read_send(phy);
5605 static void bfa_dconf_cbfn(
void *dconf,
bfa_status_t status);
5606 static void bfa_dconf_timer(
void *cbarg);
5634 bfa_dconf_init_cb, dconf->
bfa);
5723 bfa_dconf_flash_write(dconf);
5735 bfa_dconf_flash_write(dconf);
5849 if (cfg->
drvcfg.min_cfg) {
5885 bfa_dconf_start(
struct bfa_s *bfa)
5890 bfa_dconf_stop(
struct bfa_s *bfa)
5894 static void bfa_dconf_timer(
void *cbarg)
5900 bfa_dconf_iocdisable(
struct bfa_s *bfa)
5907 bfa_dconf_detach(
struct bfa_s *bfa)
5920 bfa_dconf_cbfn, dconf);
5964 #define BFA_FRU_DMA_BUF_SZ 0x02000
5965 #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
5966 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6344 switch (msg->
mh.msg_id) {
6358 bfa_fru_write_send(fru,
6361 bfa_fru_write_send(fru,
6392 bfa_fru_read_send(fru,
6395 bfa_fru_read_send(fru,