1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/slab.h>
27 static int ql_write_other_func_reg(
struct ql_adapter *qdev,
49 temp = ql_read_other_func_reg(qdev, reg);
62 static int ql_read_other_func_serdes_reg(
struct ql_adapter *qdev,
u32 reg,
113 u32 *direct_ptr,
u32 *indirect_ptr,
114 unsigned int direct_valid,
unsigned int indirect_valid)
120 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
123 *direct_ptr = 0xDEADBEEF;
127 status = ql_read_other_func_serdes_reg(
128 qdev, addr, indirect_ptr);
131 *indirect_ptr = 0xDEADBEEF;
134 static int ql_get_serdes_regs(
struct ql_adapter *qdev,
138 unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
139 unsigned int xaui_indirect_valid,
i;
143 xfi_direct_valid = xfi_indirect_valid = 0;
144 xaui_direct_valid = xaui_indirect_valid = 1;
147 if (qdev->
func & 1) {
149 status = ql_read_other_func_serdes_reg(qdev,
154 XG_SERDES_ADDR_XAUI_PWR_DOWN)
155 xaui_indirect_valid = 0;
157 status = ql_read_serdes_reg(qdev,
162 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
163 XG_SERDES_ADDR_XAUI_PWR_DOWN)
164 xaui_direct_valid = 0;
167 status = ql_read_other_func_serdes_reg(qdev,
171 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
172 XG_SERDES_ADDR_XAUI_PWR_DOWN)
173 xaui_indirect_valid = 0;
175 status = ql_read_serdes_reg(qdev,
179 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
180 XG_SERDES_ADDR_XAUI_PWR_DOWN)
181 xaui_direct_valid = 0;
193 XG_SERDES_ADDR_XFI1_PWR_UP) {
197 xfi_indirect_valid = 1;
199 xfi_direct_valid = 1;
202 XG_SERDES_ADDR_XFI2_PWR_UP) {
206 xfi_direct_valid = 1;
208 xfi_indirect_valid = 1;
212 if (qdev->
func & 1) {
222 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
223 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
224 xaui_direct_valid, xaui_indirect_valid);
227 if (qdev->
func & 1) {
239 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
240 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
241 xaui_direct_valid, xaui_indirect_valid);
244 if (qdev->
func & 1) {
252 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
253 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
254 xfi_direct_valid, xfi_indirect_valid);
257 if (qdev->
func & 1) {
267 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
268 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
269 xfi_direct_valid, xfi_indirect_valid);
272 if (qdev->
func & 1) {
284 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
285 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
286 xfi_direct_valid, xfi_indirect_valid);
289 if (qdev->
func & 1) {
299 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
300 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
301 xfi_direct_valid, xfi_indirect_valid);
304 if (qdev->
func & 1) {
315 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
316 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
317 xfi_direct_valid, xfi_indirect_valid);
321 if (qdev->
func & 1) {
332 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
333 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
334 xfi_direct_valid, xfi_indirect_valid);
338 static int ql_read_other_func_xgmac_reg(
struct ql_adapter *qdev,
u32 reg,
344 status = ql_wait_other_func_reg_rdy(qdev,
XGMAC_ADDR / 4,
353 status = ql_wait_other_func_reg_rdy(qdev,
XGMAC_ADDR / 4,
359 *data = ql_read_other_func_reg(qdev,
XGMAC_DATA / 4);
368 unsigned int other_function)
377 if ((i == 0x00000114) ||
381 (i > 0x00000150 && i < 0x000001fc) ||
382 (i > 0x00000278 && i < 0x000002a0) ||
383 (i > 0x000002c0 && i < 0x000002cf) ||
384 (i > 0x000002dc && i < 0x000002f0) ||
385 (i > 0x000003c8 && i < 0x00000400) ||
386 (i > 0x00000400 && i < 0x00000410) ||
387 (i > 0x00000410 && i < 0x00000420) ||
388 (i > 0x00000420 && i < 0x00000430) ||
389 (i > 0x00000430 && i < 0x00000440) ||
390 (i > 0x00000440 && i < 0x00000450) ||
391 (i > 0x00000450 && i < 0x00000500) ||
392 (i > 0x0000054c && i < 0x00000568) ||
393 (i > 0x000005c8 && i < 0x00000600)) {
396 ql_read_other_func_xgmac_reg(qdev, i, buf);
408 static int ql_get_ets_regs(
struct ql_adapter *qdev,
u32 * buf)
413 for (i = 0; i < 8; i++, buf++) {
414 ql_write32(qdev,
NIC_ETS, i << 29 | 0x08000000);
415 *buf = ql_read32(qdev,
NIC_ETS);
418 for (i = 0; i < 2; i++, buf++) {
419 ql_write32(qdev,
CNA_ETS, i << 29 | 0x08000000);
420 *buf = ql_read32(qdev,
CNA_ETS);
426 static void ql_get_intr_states(
struct ql_adapter *qdev,
u32 * buf)
433 *buf = ql_read32(qdev,
INTR_EN);
437 static int ql_get_cam_entries(
struct ql_adapter *qdev,
u32 * buf)
446 for (i = 0; i < 16; i++) {
451 "Failed read of mac index register\n");
458 for (i = 0; i < 32; i++) {
463 "Failed read of mac index register\n");
474 static int ql_get_routing_entries(
struct ql_adapter *qdev,
u32 * buf)
483 for (i = 0; i < 16; i++) {
487 "Failed read of routing index register\n");
499 static int ql_get_mpi_shadow_regs(
struct ql_adapter *qdev,
u32 * buf)
518 static int ql_get_mpi_regs(
struct ql_adapter *qdev,
u32 * buf,
522 for (i = 0; i <
count; i++, buf++) {
534 u32 module, mux_sel, probe, lo_val, hi_val;
537 if (!((valid >> module) & 1))
562 static int ql_get_probe_dump(
struct ql_adapter *qdev,
unsigned int *buf)
579 static int ql_get_routing_index_registers(
struct ql_adapter *qdev,
u32 *buf)
591 for (type = 0; type < 4; type++) {
596 for (index = 0; index < index_max; index++) {
600 ql_write32(qdev,
RT_IDX, val);
603 result_index = ql_read32(qdev,
RT_IDX);
604 result_data = ql_read32(qdev,
RT_DATA);
620 static void ql_get_mac_protocol_registers(
struct ql_adapter *qdev,
u32 *buf)
622 u32 result_index, result_data;
673 pr_err(
"Bad type!!! 0x%08x\n", type);
678 for (index = 0; index < max_index; index++) {
679 for (offset = 0; offset < max_offset; offset++) {
687 result_index = ql_read32(qdev,
700 static void ql_get_sem_registers(
struct ql_adapter *qdev,
u32 *buf)
719 static void ql_build_coredump_seg_header(
725 seg_hdr->
segNum = seg_number;
756 "Failed RISC pause. Status = 0x%.08x\n", status);
775 sizeof(mpi_coredump->
nic_regs),
"NIC1 Registers");
780 sizeof(mpi_coredump->
nic2_regs),
"NIC2 Registers");
786 sizeof(mpi_coredump->
xgmac1),
"NIC1 XGMac Registers");
791 sizeof(mpi_coredump->
xgmac2),
"NIC2 XGMac Registers");
793 if (qdev->
func & 1) {
797 ql_read32(qdev, i *
sizeof(
u32));
801 ql_read_other_func_reg(qdev, (i *
sizeof(
u32)) / 4);
803 ql_get_xgmac_regs(qdev, &mpi_coredump->
xgmac2[0], 0);
804 ql_get_xgmac_regs(qdev, &mpi_coredump->
xgmac1[0], 1);
809 ql_read32(qdev, i *
sizeof(
u32));
812 ql_read_other_func_reg(qdev, (i *
sizeof(
u32)) / 4);
814 ql_get_xgmac_regs(qdev, &mpi_coredump->
xgmac1[0], 0);
815 ql_get_xgmac_regs(qdev, &mpi_coredump->
xgmac2[0], 1);
819 ql_build_coredump_seg_header(&mpi_coredump->
xaui_an_hdr,
823 "XAUI AN Registers");
830 "XAUI HSS PCS Registers");
841 "XFI TRAIN Registers");
847 "XFI HSS PCS Registers");
853 "XFI HSS TX Registers");
859 "XFI HSS RX Registers");
865 "XFI HSS PLL Registers");
867 ql_build_coredump_seg_header(&mpi_coredump->
xaui2_an_hdr,
871 "XAUI2 AN Registers");
877 "XAUI2 HSS PCS Registers");
879 ql_build_coredump_seg_header(&mpi_coredump->
xfi2_an_hdr,
883 "XFI2 AN Registers");
889 "XFI2 TRAIN Registers");
895 "XFI2 HSS PCS Registers");
901 "XFI2 HSS TX Registers");
907 "XFI2 HSS RX Registers");
913 "XFI2 HSS PLL Registers");
915 status = ql_get_serdes_regs(qdev, mpi_coredump);
918 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
931 status = ql_get_mpi_regs(qdev, &mpi_coredump->
mpi_core_regs[0],
936 status = ql_get_mpi_shadow_regs(qdev,
958 status = ql_get_mpi_regs(qdev, &mpi_coredump->
rmii_regs[0],
969 status = ql_get_mpi_regs(qdev, &mpi_coredump->
fcmac1_regs[0],
982 status = ql_get_mpi_regs(qdev, &mpi_coredump->
fcmac2_regs[0],
993 status = ql_get_mpi_regs(qdev, &mpi_coredump->
fc1_mbx_regs[0],
1004 status = ql_get_mpi_regs(qdev, &mpi_coredump->
ide_regs[0],
1015 status = ql_get_mpi_regs(qdev, &mpi_coredump->
nic1_mbx_regs[0],
1026 status = ql_get_mpi_regs(qdev, &mpi_coredump->
smbus_regs[0],
1037 status = ql_get_mpi_regs(qdev, &mpi_coredump->
fc2_mbx_regs[0],
1048 status = ql_get_mpi_regs(qdev, &mpi_coredump->
nic2_mbx_regs[0],
1059 status = ql_get_mpi_regs(qdev, &mpi_coredump->
i2c_regs[0],
1070 status = ql_get_mpi_regs(qdev, &mpi_coredump->
memc_regs[0],
1081 status = ql_get_mpi_regs(qdev, &mpi_coredump->
pbus_regs[0],
1092 status = ql_get_mpi_regs(qdev, &mpi_coredump->
mde_regs[0],
1114 ql_get_intr_states(qdev, &mpi_coredump->
intr_states[0]);
1121 status = ql_get_cam_entries(qdev, &mpi_coredump->
cam_entries[0]);
1130 status = ql_get_routing_entries(qdev,
1136 ql_build_coredump_seg_header(&mpi_coredump->
ets_seg_hdr,
1139 +
sizeof(mpi_coredump->
ets),
1141 status = ql_get_ets_regs(qdev, &mpi_coredump->
ets[0]);
1150 ql_get_probe_dump(qdev, &mpi_coredump->
probe_dump[0]);
1157 status = ql_get_routing_index_registers(qdev,
1167 ql_get_mac_protocol_registers(qdev, &mpi_coredump->
mac_prot_regs[0]);
1173 sizeof(mpi_coredump->
sem_regs),
"Sem Registers");
1175 ql_get_sem_registers(qdev, &mpi_coredump->
sem_regs[0]);
1184 "Failed RISC unpause. Status = 0x%.08x\n", status);
1192 "Failed RISC reset. Status = 0x%.08x\n", status);
1205 "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1220 "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1230 static void ql_get_core_dump(
struct ql_adapter *qdev)
1237 if (!netif_running(qdev->
ndev)) {
1239 "Force Coredump can only be done from interface that is up\n");
1280 for (i = 0; i < 64; i++)
1281 mpi_coredump->
nic_regs[i] = ql_read32(qdev, i *
sizeof(
u32));
1290 ql_get_intr_states(qdev, &mpi_coredump->
intr_states[0]);
1297 status = ql_get_cam_entries(qdev, &mpi_coredump->
cam_entries[0]);
1306 status = ql_get_routing_entries(qdev,
1312 ql_build_coredump_seg_header(&mpi_coredump->
ets_seg_hdr,
1315 +
sizeof(mpi_coredump->
ets),
1317 status = ql_get_ets_regs(qdev, &mpi_coredump->
ets[0]);
1340 ql_get_core_dump(qdev);
1355 "Core is dumping to log file!\n");
1357 for (i = 0; i <
count; i += 8) {
1358 pr_err(
"%.08x: %.08x %.08x %.08x %.08x %.08x "
1359 "%.08x %.08x %.08x\n", i,
1373 static void ql_dump_intr_states(
struct ql_adapter *qdev)
1379 value = ql_read32(qdev,
INTR_EN);
1380 pr_err(
"%s: Interrupt %d is %s\n",
1381 qdev->
ndev->name, i,
1382 (value &
INTR_EN_EN ?
"enabled" :
"disabled"));
1386 #define DUMP_XGMAC(qdev, reg) \
1389 ql_read_xgmac_reg(qdev, reg, &data); \
1390 pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
1393 void ql_dump_xgmac_control_regs(
struct ql_adapter *qdev)
1396 pr_err(
"%s: Couldn't get xgmac sem\n", __func__);
1402 DUMP_XGMAC(qdev,
TX_CFG);
1403 DUMP_XGMAC(qdev,
RX_CFG);
1419 static void ql_dump_ets_regs(
struct ql_adapter *qdev)
1423 static void ql_dump_cam_entries(
struct ql_adapter *qdev)
1431 for (i = 0; i < 4; i++) {
1433 pr_err(
"%s: Failed read of mac index register\n",
1438 pr_err(
"%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1439 qdev->
ndev->name, i, value[1], value[0],
1443 for (i = 0; i < 32; i++) {
1446 pr_err(
"%s: Failed read of mac index register\n",
1451 pr_err(
"%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1452 qdev->
ndev->name, i, value[1], value[0]);
1458 void ql_dump_routing_entries(
struct ql_adapter *qdev)
1465 for (i = 0; i < 16; i++) {
1468 pr_err(
"%s: Failed read of routing index register\n",
1473 pr_err(
"%s: Routing Mask %d = 0x%.08x\n",
1474 qdev->
ndev->name, i, value);
1480 #define DUMP_REG(qdev, reg) \
1481 pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1485 pr_err(
"reg dump for function #%d\n", qdev->
func);
1539 ql_dump_intr_states(qdev);
1540 ql_dump_xgmac_control_regs(qdev);
1541 ql_dump_ets_regs(qdev);
1542 ql_dump_cam_entries(qdev);
1543 ql_dump_routing_entries(qdev);
1549 #define DUMP_STAT(qdev, stat) \
1550 pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
1554 pr_err(
"%s: Enter\n", __func__);
1555 DUMP_STAT(qdev, tx_pkts);
1557 DUMP_STAT(qdev, tx_mcast_pkts);
1558 DUMP_STAT(qdev, tx_bcast_pkts);
1559 DUMP_STAT(qdev, tx_ucast_pkts);
1560 DUMP_STAT(qdev, tx_ctl_pkts);
1561 DUMP_STAT(qdev, tx_pause_pkts);
1562 DUMP_STAT(qdev, tx_64_pkt);
1563 DUMP_STAT(qdev, tx_65_to_127_pkt);
1564 DUMP_STAT(qdev, tx_128_to_255_pkt);
1565 DUMP_STAT(qdev, tx_256_511_pkt);
1566 DUMP_STAT(qdev, tx_512_to_1023_pkt);
1567 DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1568 DUMP_STAT(qdev, tx_1519_to_max_pkt);
1569 DUMP_STAT(qdev, tx_undersize_pkt);
1570 DUMP_STAT(qdev, tx_oversize_pkt);
1572 DUMP_STAT(qdev, rx_bytes_ok);
1573 DUMP_STAT(qdev, rx_pkts);
1574 DUMP_STAT(qdev, rx_pkts_ok);
1575 DUMP_STAT(qdev, rx_bcast_pkts);
1576 DUMP_STAT(qdev, rx_mcast_pkts);
1577 DUMP_STAT(qdev, rx_ucast_pkts);
1578 DUMP_STAT(qdev, rx_undersize_pkts);
1579 DUMP_STAT(qdev, rx_oversize_pkts);
1580 DUMP_STAT(qdev, rx_jabber_pkts);
1581 DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1582 DUMP_STAT(qdev, rx_drop_events);
1583 DUMP_STAT(qdev, rx_fcerr_pkts);
1584 DUMP_STAT(qdev, rx_align_err);
1585 DUMP_STAT(qdev, rx_symbol_err);
1586 DUMP_STAT(qdev, rx_mac_err);
1587 DUMP_STAT(qdev, rx_ctl_pkts);
1588 DUMP_STAT(qdev, rx_pause_pkts);
1589 DUMP_STAT(qdev, rx_64_pkts);
1590 DUMP_STAT(qdev, rx_65_to_127_pkts);
1591 DUMP_STAT(qdev, rx_128_255_pkts);
1592 DUMP_STAT(qdev, rx_256_511_pkts);
1593 DUMP_STAT(qdev, rx_512_to_1023_pkts);
1594 DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1595 DUMP_STAT(qdev, rx_1519_to_max_pkts);
1596 DUMP_STAT(qdev, rx_len_err_pkts);
1602 #define DUMP_QDEV_FIELD(qdev, type, field) \
1603 pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
1604 #define DUMP_QDEV_DMA_FIELD(qdev, field) \
1605 pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
1606 #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1607 pr_err("%s[%d].%s = " type "\n", \
1608 #array, index, #field, qdev->array[index].field);
1612 DUMP_QDEV_FIELD(qdev,
"%lx",
flags);
1613 DUMP_QDEV_FIELD(qdev,
"%p", vlgrp);
1614 DUMP_QDEV_FIELD(qdev,
"%p", pdev);
1615 DUMP_QDEV_FIELD(qdev,
"%p",
ndev);
1616 DUMP_QDEV_FIELD(qdev,
"%d", chip_rev_id);
1617 DUMP_QDEV_FIELD(qdev,
"%p", reg_base);
1618 DUMP_QDEV_FIELD(qdev,
"%p", doorbell_area);
1619 DUMP_QDEV_FIELD(qdev,
"%d", doorbell_area_size);
1621 DUMP_QDEV_FIELD(qdev,
"%p", rx_ring_shadow_reg_area);
1622 DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1623 DUMP_QDEV_FIELD(qdev,
"%p", tx_ring_shadow_reg_area);
1624 DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1625 DUMP_QDEV_FIELD(qdev,
"%d", intr_count);
1628 DUMP_QDEV_ARRAY(qdev,
"%d", msi_x_entry, i,
vector);
1629 DUMP_QDEV_ARRAY(qdev,
"%d", msi_x_entry, i,
entry);
1635 DUMP_QDEV_ARRAY(qdev,
"0x%08x",
intr_context, i, intr_en_mask);
1636 DUMP_QDEV_ARRAY(qdev,
"0x%08x",
intr_context, i, intr_dis_mask);
1637 DUMP_QDEV_ARRAY(qdev,
"0x%08x",
intr_context, i, intr_read_mask);
1639 DUMP_QDEV_FIELD(qdev,
"%d", tx_ring_count);
1640 DUMP_QDEV_FIELD(qdev,
"%d", rx_ring_count);
1641 DUMP_QDEV_FIELD(qdev,
"%d", ring_mem_size);
1642 DUMP_QDEV_FIELD(qdev,
"%p", ring_mem);
1643 DUMP_QDEV_FIELD(qdev,
"%d", intr_count);
1644 DUMP_QDEV_FIELD(qdev,
"%p",
tx_ring);
1645 DUMP_QDEV_FIELD(qdev,
"%d", rss_ring_count);
1646 DUMP_QDEV_FIELD(qdev,
"%p",
rx_ring);
1647 DUMP_QDEV_FIELD(qdev,
"%d", default_rx_queue);
1648 DUMP_QDEV_FIELD(qdev,
"0x%08x", xg_sem_mask);
1649 DUMP_QDEV_FIELD(qdev,
"0x%08x", port_link_up);
1650 DUMP_QDEV_FIELD(qdev,
"0x%08x",
port_init);
1657 pr_err(
"Dumping wqicb stuff...\n");
1660 pr_err(
"wqicb->cq_id_rss = %d\n",
1663 pr_err(
"wqicb->wq_addr = 0x%llx\n",
1665 pr_err(
"wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1671 if (tx_ring ==
NULL)
1673 pr_err(
"===================== Dumping tx_ring %d ===============\n",
1676 pr_err(
"tx_ring->base_dma = 0x%llx\n",
1678 pr_err(
"tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1689 pr_err(
"tx_ring->q = %p\n", tx_ring->
q);
1693 void ql_dump_ricb(
struct ricb *
ricb)
1696 pr_err(
"===================== Dumping ricb ===============\n");
1697 pr_err(
"Dumping ricb stuff...\n");
1700 pr_err(
"ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1711 for (i = 0; i < 16; i++)
1712 pr_err(
"ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1714 for (i = 0; i < 10; i++)
1715 pr_err(
"ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1717 for (i = 0; i < 4; i++)
1718 pr_err(
"ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1724 pr_err(
"Dumping cqicb stuff...\n");
1729 pr_err(
"cqicb->addr = 0x%llx\n",
1731 pr_err(
"cqicb->prod_idx_addr = 0x%llx\n",
1733 pr_err(
"cqicb->pkt_delay = 0x%.04x\n",
1735 pr_err(
"cqicb->irq_delay = 0x%.04x\n",
1737 pr_err(
"cqicb->lbq_addr = 0x%llx\n",
1739 pr_err(
"cqicb->lbq_buf_size = 0x%.04x\n",
1741 pr_err(
"cqicb->lbq_len = 0x%.04x\n",
1743 pr_err(
"cqicb->sbq_addr = 0x%llx\n",
1745 pr_err(
"cqicb->sbq_buf_size = 0x%.04x\n",
1747 pr_err(
"cqicb->sbq_len = 0x%.04x\n",
1753 if (rx_ring ==
NULL)
1755 pr_err(
"===================== Dumping rx_ring %d ===============\n",
1757 pr_err(
"Dumping rx_ring %d, type = %s%s%s\n",
1759 rx_ring->
type ==
TX_Q ?
"OUTBOUND COMPLETIONS" :
"",
1760 rx_ring->
type ==
RX_Q ?
"INBOUND_COMPLETIONS" :
"");
1763 pr_err(
"rx_ring->cq_base_dma = %llx\n",
1767 pr_err(
"rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1771 pr_err(
"rx_ring->prod_idx_sh_reg_dma = %llx\n",
1773 pr_err(
"rx_ring->cnsmr_idx_db_reg = %p\n",
1780 pr_err(
"rx_ring->lbq_base_dma = %llx\n",
1782 pr_err(
"rx_ring->lbq_base_indirect = %p\n",
1784 pr_err(
"rx_ring->lbq_base_indirect_dma = %llx\n",
1786 pr_err(
"rx_ring->lbq = %p\n", rx_ring->
lbq);
1789 pr_err(
"rx_ring->lbq_prod_idx_db_reg = %p\n",
1798 pr_err(
"rx_ring->sbq_base_dma = %llx\n",
1800 pr_err(
"rx_ring->sbq_base_indirect = %p\n",
1802 pr_err(
"rx_ring->sbq_base_indirect_dma = %llx\n",
1804 pr_err(
"rx_ring->sbq = %p\n", rx_ring->
sbq);
1807 pr_err(
"rx_ring->sbq_prod_idx_db_reg addr = %p\n",
1815 pr_err(
"rx_ring->irq = %d\n", rx_ring->
irq);
1816 pr_err(
"rx_ring->cpu = %d\n", rx_ring->
cpu);
1817 pr_err(
"rx_ring->qdev = %p\n", rx_ring->
qdev);
1824 pr_err(
"%s: Enter\n", __func__);
1831 pr_err(
"%s: Failed to upload control block!\n", __func__);
1836 ql_dump_wqicb((
struct wqicb *)ptr);
1839 ql_dump_cqicb((
struct cqicb *)ptr);
1842 ql_dump_ricb((
struct ricb *)ptr);
1845 pr_err(
"%s: Invalid bit value = %x\n", __func__, bit);
1856 pr_err(
"tbd->addr = 0x%llx\n",
1858 pr_err(
"tbd->len = %d\n",
1860 pr_err(
"tbd->flags = %s %s\n",
1864 pr_err(
"tbd->addr = 0x%llx\n",
1866 pr_err(
"tbd->len = %d\n",
1868 pr_err(
"tbd->flags = %s %s\n",
1872 pr_err(
"tbd->addr = 0x%llx\n",
1874 pr_err(
"tbd->len = %d\n",
1876 pr_err(
"tbd->flags = %s %s\n",
1889 pr_err(
"%s\n", __func__);
1892 pr_err(
"flags1 = %s %s %s %s %s\n",
1898 pr_err(
"flags2 = %s %s %s\n",
1902 pr_err(
"flags3 = %s %s %s\n",
1910 pr_err(
"frame_len = %d\n",
1914 pr_err(
"prot_hdr_len = %d\n",
1916 pr_err(
"hdr_offset = 0x%.04x\n",
1920 pr_err(
"frame_len = %d\n",
1924 tbd = &ob_mac_iocb->
tbd[0];
1925 ql_dump_tx_desc(tbd);
1930 pr_err(
"%s\n", __func__);
1932 pr_err(
"flags = %s %s %s %s %s %s %s\n",
1947 pr_err(
"%s\n", __func__);
1949 pr_err(
"flags1 = %s%s%s%s%s%s\n",
1958 pr_err(
"%s%s%s Multicast\n",
1966 pr_err(
"flags2 = %s%s%s%s%s\n",
1974 pr_err(
"%s%s%s%s%s error\n",
1986 pr_err(
"flags3 = %s%s\n",
1991 pr_err(
"RSS flags = %s%s%s%s\n",
2001 pr_err(
"data_len = %d\n",
2003 pr_err(
"data_addr = 0x%llx\n",
2012 pr_err(
"flags4 = %s%s%s\n",
2018 pr_err(
"hdr length = %d\n",
2020 pr_err(
"hdr addr = 0x%llx\n",