8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
24 #include <linux/errno.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/netdevice.h>
32 #include <linux/ethtool.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/if_vlan.h>
38 #include <linux/prefetch.h>
42 #define DRV_NAME "qla3xxx"
43 #define DRV_STRING "QLogic ISP3XXX Network Driver"
44 #define DRV_VERSION "v2.03.00-k5"
46 static const char ql3xxx_driver_name[] =
DRV_NAME;
47 static const char ql3xxx_driver_version[] =
DRV_VERSION;
49 #define TIMED_OUT_MSG \
50 "Timed out waiting for management port to get free before issuing command\n"
57 static const u32 default_msg
61 static int debug = -1;
105 static int ql_sem_spinlock(
struct ql3_adapter *qdev,
106 u32 sem_mask,
u32 sem_bits)
114 writel((sem_mask | sem_bits),
117 if ((value & (sem_mask >> 16)) == sem_bits)
140 return ((value & (sem_mask >> 16)) == sem_bits);
146 static int ql_wait_for_drvr_lock(
struct ql3_adapter *qdev)
154 if (ql_sem_lock(qdev,
159 "driver lock acquired\n");
164 netdev_err(qdev->
ndev,
"Timed out waiting for driver lock...\n");
186 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
204 ql_set_register_page(qdev, 0);
207 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
214 ql_set_register_page(qdev, 0);
218 static void ql_write_common_reg_l(
struct ql3_adapter *qdev,
226 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
229 static void ql_write_common_reg(
struct ql3_adapter *qdev,
236 static void ql_write_nvram_reg(
struct ql3_adapter *qdev,
244 static void ql_write_page0_reg(
struct ql3_adapter *qdev,
248 ql_set_register_page(qdev, 0);
256 static void ql_write_page1_reg(
struct ql3_adapter *qdev,
260 ql_set_register_page(qdev, 1);
268 static void ql_write_page2_reg(
struct ql3_adapter *qdev,
272 ql_set_register_page(qdev, 2);
277 static void ql_disable_interrupts(
struct ql3_adapter *qdev)
282 ql_write_common_reg_l(qdev, &port_regs->
CommonRegs.ispInterruptMaskReg,
287 static void ql_enable_interrupts(
struct ql3_adapter *qdev)
292 ql_write_common_reg_l(qdev, &port_regs->
CommonRegs.ispInterruptMaskReg,
297 static void ql_release_to_lrg_buf_free_list(
struct ql3_adapter *qdev,
311 if (!lrg_buf_cb->
skb) {
312 lrg_buf_cb->
skb = netdev_alloc_skb(qdev->
ndev,
315 netdev_err(qdev->
ndev,
"failed netdev_alloc_skb()\n");
323 map = pci_map_single(qdev->
pdev,
324 lrg_buf_cb->
skb->data,
328 err = pci_dma_mapping_error(qdev->
pdev, map);
330 netdev_err(qdev->
ndev,
331 "PCI mapping failed with error: %d\n",
333 dev_kfree_skb(lrg_buf_cb->
skb);
359 if (lrg_buf_cb !=
NULL) {
372 static void fm93c56a_deselect(
struct ql3_adapter *qdev);
373 static void eeprom_readword(
struct ql3_adapter *qdev,
u32 eepromAddr,
374 unsigned short *value);
379 static void fm93c56a_select(
struct ql3_adapter *qdev)
387 ql_write_nvram_reg(qdev, spir,
405 ql_write_nvram_reg(qdev, spir,
408 ql_write_nvram_reg(qdev, spir,
411 ql_write_nvram_reg(qdev, spir,
417 previousBit = 0xffff;
419 dataBit = (cmd &
mask)
422 if (previousBit != dataBit) {
424 ql_write_nvram_reg(qdev, spir,
427 previousBit = dataBit;
429 ql_write_nvram_reg(qdev, spir,
432 ql_write_nvram_reg(qdev, spir,
438 mask = 1 << (addrBits - 1);
440 previousBit = 0xffff;
441 for (i = 0; i < addrBits; i++) {
444 if (previousBit != dataBit) {
449 ql_write_nvram_reg(qdev, spir,
452 previousBit = dataBit;
454 ql_write_nvram_reg(qdev, spir,
457 ql_write_nvram_reg(qdev, spir,
460 eepromAddr = eepromAddr << 1;
467 static void fm93c56a_deselect(
struct ql3_adapter *qdev)
480 static void fm93c56a_datain(
struct ql3_adapter *qdev,
unsigned short *value)
491 for (i = 0; i < dataBits; i++) {
492 ql_write_nvram_reg(qdev, spir,
495 ql_write_nvram_reg(qdev, spir,
498 dataBit = (ql_read_common_reg(qdev, spir) &
500 data = (data << 1) | dataBit;
508 static void eeprom_readword(
struct ql3_adapter *qdev,
509 u32 eepromAddr,
unsigned short *value)
511 fm93c56a_select(qdev);
513 fm93c56a_datain(qdev, value);
514 fm93c56a_deselect(qdev);
525 static int ql_get_nvram_params(
struct ql3_adapter *qdev)
539 pr_err(
"%s: Failed ql_sem_spinlock()\n", __func__);
540 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
545 eeprom_readword(qdev, index, pEEPROMData);
546 checksum += *pEEPROMData;
552 netdev_err(qdev->
ndev,
"checksum should be zero, is %x!!\n",
554 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
558 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
562 static const u32 PHYAddr[2] = {
566 static int ql_wait_for_mii_ready(
struct ql3_adapter *qdev)
583 static void ql_mii_enable_scan_mode(
struct ql3_adapter *qdev)
640 static int ql_mii_write_reg_ex(
struct ql3_adapter *qdev,
647 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
649 if (ql_wait_for_mii_ready(qdev)) {
660 if (ql_wait_for_mii_ready(qdev)) {
666 ql_mii_enable_scan_mode(qdev);
671 static int ql_mii_read_reg_ex(
struct ql3_adapter *qdev,
u16 regAddr,
679 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
681 if (ql_wait_for_mii_ready(qdev)) {
696 if (ql_wait_for_mii_ready(qdev)) {
705 ql_mii_enable_scan_mode(qdev);
715 ql_mii_disable_scan_mode(qdev);
717 if (ql_wait_for_mii_ready(qdev)) {
728 if (ql_wait_for_mii_ready(qdev)) {
733 ql_mii_enable_scan_mode(qdev);
744 ql_mii_disable_scan_mode(qdev);
746 if (ql_wait_for_mii_ready(qdev)) {
761 if (ql_wait_for_mii_ready(qdev)) {
769 ql_mii_enable_scan_mode(qdev);
774 static void ql_petbi_reset(
struct ql3_adapter *qdev)
779 static void ql_petbi_start_neg(
struct ql3_adapter *qdev)
797 static void ql_petbi_reset_ex(
struct ql3_adapter *qdev)
803 static void ql_petbi_start_neg_ex(
struct ql3_adapter *qdev)
824 static void ql_petbi_init(
struct ql3_adapter *qdev)
826 ql_petbi_reset(qdev);
827 ql_petbi_start_neg(qdev);
830 static void ql_petbi_init_ex(
struct ql3_adapter *qdev)
832 ql_petbi_reset_ex(qdev);
833 ql_petbi_start_neg_ex(qdev);
836 static int ql_is_petbi_neg_pause(
struct ql3_adapter *qdev)
846 static void phyAgereSpecificInit(
struct ql3_adapter *qdev,
u32 miiAddr)
848 netdev_info(qdev->
ndev,
"enabling Agere specific PHY\n");
850 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
852 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
854 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
856 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
858 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
860 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
862 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
864 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
866 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
868 ql_mii_write_reg_ex(qdev, 0x11,
869 0x0020 | (PHYAddr[qdev->
mac_index] >> 8), miiAddr);
875 ql_mii_write_reg(qdev, 0x12, 0x840a);
876 ql_mii_write_reg(qdev, 0x00, 0x1140);
877 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
881 u16 phyIdReg0,
u16 phyIdReg1)
888 if (phyIdReg0 == 0xffff)
891 if (phyIdReg1 == 0xffff)
895 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
901 if ((oui == PHY_DEVICES[i].phyIdOUI) &&
903 netdev_info(qdev->
ndev,
"Phy: %s\n",
904 PHY_DEVICES[i].
name);
913 static int ql_phy_get_speed(
struct ql3_adapter *qdev)
919 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0)
922 reg = (reg >> 8) & 3;
929 reg = (((reg & 0x18) >> 3) & 3);
944 static int ql_is_full_dup(
struct ql3_adapter *qdev)
950 if (ql_mii_read_reg(qdev, 0x1A, ®))
953 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
964 static int ql_is_phy_neg_pause(
struct ql3_adapter *qdev)
978 bool agereAddrChangeNeeded =
false;
985 netdev_err(qdev->
ndev,
"Could not read from reg PHY_ID_0_REG\n");
991 netdev_err(qdev->
ndev,
"Could not read from reg PHY_ID_1_REG\n");
996 if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1005 err = ql_mii_read_reg_ex(qdev,
PHY_ID_0_REG, ®1, miiAddr);
1007 netdev_err(qdev->
ndev,
1008 "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1012 err = ql_mii_read_reg_ex(qdev,
PHY_ID_1_REG, ®2, miiAddr);
1014 netdev_err(qdev->
ndev,
"Could not read from reg PHY_ID_1_REG after Agere detected\n");
1019 agereAddrChangeNeeded =
true;
1024 qdev->
phyType = getPhyType(qdev, reg1, reg2);
1028 phyAgereSpecificInit(qdev, miiAddr);
1030 netdev_err(qdev->
ndev,
"PHY is unknown\n");
1060 static void ql_mac_cfg_soft_reset(
struct ql3_adapter *qdev,
u32 enable)
1100 static void ql_mac_cfg_full_dup(
struct ql3_adapter *qdev,
u32 enable)
1120 static void ql_mac_cfg_pause(
struct ql3_adapter *qdev,
u32 enable)
1158 temp = ql_read_page0_reg(qdev, &port_regs->
portStatus);
1159 return (temp & bitToCheck) != 0;
1162 static int ql_is_auto_cfg(
struct ql3_adapter *qdev)
1165 ql_mii_read_reg(qdev, 0x00, ®);
1166 return (reg & 0x1000) != 0;
1172 static int ql_is_auto_neg_complete(
struct ql3_adapter *qdev)
1188 temp = ql_read_page0_reg(qdev, &port_regs->
portStatus);
1189 if (temp & bitToCheck) {
1200 static int ql_is_neg_pause(
struct ql3_adapter *qdev)
1202 if (ql_is_fiber(qdev))
1203 return ql_is_petbi_neg_pause(qdev);
1205 return ql_is_phy_neg_pause(qdev);
1208 static int ql_auto_neg_error(
struct ql3_adapter *qdev)
1223 temp = ql_read_page0_reg(qdev, &port_regs->
portStatus);
1224 return (temp & bitToCheck) != 0;
1229 if (ql_is_fiber(qdev))
1232 return ql_phy_get_speed(qdev);
1235 static int ql_is_link_full_dup(
struct ql3_adapter *qdev)
1237 if (ql_is_fiber(qdev))
1240 return ql_is_full_dup(qdev);
1246 static int ql_link_down_detect(
struct ql3_adapter *qdev)
1263 ql_read_common_reg(qdev, &port_regs->
CommonRegs.ispControlStatus);
1264 return (temp & bitToCheck) != 0;
1270 static int ql_link_down_detect_clear(
struct ql3_adapter *qdev)
1277 ql_write_common_reg(qdev,
1284 ql_write_common_reg(qdev,
1300 static int ql_this_adapter_controls_port(
struct ql3_adapter *qdev)
1318 temp = ql_read_page0_reg(qdev, &port_regs->
portStatus);
1319 if (temp & bitToCheck) {
1321 "not link master\n");
1329 static void ql_phy_reset_ex(
struct ql3_adapter *qdev)
1335 static void ql_phy_start_neg_ex(
struct ql3_adapter *qdev)
1338 u16 portConfiguration;
1341 ql_mii_write_reg(qdev, 0x13, 0x0000);
1346 qdev->
nvram_data.macCfg_port0.portConfiguration;
1349 qdev->
nvram_data.macCfg_port1.portConfiguration;
1353 if (portConfiguration == 0)
1388 if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1395 if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1408 static void ql_phy_init_ex(
struct ql3_adapter *qdev)
1410 ql_phy_reset_ex(qdev);
1412 ql_phy_start_neg_ex(qdev);
1434 temp = ql_read_page0_reg(qdev, &port_regs->
portStatus);
1435 if (temp & bitToCheck)
1443 static int ql_port_start(
struct ql3_adapter *qdev)
1448 netdev_err(qdev->
ndev,
"Could not get hw lock for GIO\n");
1452 if (ql_is_fiber(qdev)) {
1453 ql_petbi_init(qdev);
1456 ql_phy_init_ex(qdev);
1463 static int ql_finish_auto_neg(
struct ql3_adapter *qdev)
1471 if (!ql_auto_neg_error(qdev)) {
1475 "Configuring link\n");
1476 ql_mac_cfg_soft_reset(qdev, 1);
1477 ql_mac_cfg_gig(qdev,
1481 ql_mac_cfg_full_dup(qdev,
1484 ql_mac_cfg_pause(qdev,
1487 ql_mac_cfg_soft_reset(qdev, 0);
1492 ql_mac_enable(qdev, 1);
1496 netif_start_queue(qdev->
ndev);
1499 "Link is up at %d Mbps, %s duplex\n",
1500 ql_get_link_speed(qdev),
1501 ql_is_link_full_dup(qdev) ?
"full" :
"half");
1507 "Remote error detected. Calling ql_port_start()\n");
1513 if (ql_port_start(qdev))
1527 u32 curr_link_state;
1532 curr_link_state = ql_get_link_state(qdev);
1536 "Reset in progress, skip processing link state\n");
1538 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1549 ql_port_start(qdev);
1554 if (curr_link_state ==
LS_UP) {
1556 if (ql_is_auto_neg_complete(qdev))
1557 ql_finish_auto_neg(qdev);
1560 ql_link_down_detect_clear(qdev);
1571 if (curr_link_state ==
LS_DOWN) {
1575 if (ql_link_down_detect(qdev))
1579 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1588 static void ql_get_phy_owner(
struct ql3_adapter *qdev)
1590 if (ql_this_adapter_controls_port(qdev))
1599 static void ql_init_scan_mode(
struct ql3_adapter *qdev)
1601 ql_mii_enable_scan_mode(qdev);
1604 if (ql_this_adapter_controls_port(qdev))
1605 ql_petbi_init_ex(qdev);
1607 if (ql_this_adapter_controls_port(qdev))
1608 ql_phy_init_ex(qdev);
1630 ql_write_page0_reg(qdev,
1643 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
1646 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
1647 SUPPORTED_10baseT_Full | \
1648 SUPPORTED_100baseT_Half | \
1649 SUPPORTED_100baseT_Full | \
1650 SUPPORTED_1000baseT_Half | \
1651 SUPPORTED_1000baseT_Full | \
1652 SUPPORTED_Autoneg | \
1663 static int ql_get_auto_cfg_status(
struct ql3_adapter *qdev)
1671 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1674 status = ql_is_auto_cfg(qdev);
1676 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1688 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1691 status = ql_get_link_speed(qdev);
1693 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1697 static int ql_get_full_dup(
struct ql3_adapter *qdev)
1705 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1708 status = ql_is_link_full_dup(qdev);
1710 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
1719 ecmd->
supported = ql_supported_modes(qdev);
1728 ecmd->
autoneg = ql_get_auto_cfg_status(qdev);
1729 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
1730 ecmd->
duplex = ql_get_full_dup(qdev);
1734 static void ql_get_drvinfo(
struct net_device *ndev,
1753 static void ql_set_msglevel(
struct net_device *ndev,
u32 value)
1759 static void ql_get_pauseparam(
struct net_device *ndev,
1772 pause->
autoneg = ql_get_auto_cfg_status(qdev);
1777 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1778 .get_settings = ql_get_settings,
1779 .get_drvinfo = ql_get_drvinfo,
1781 .get_msglevel = ql_get_msglevel,
1782 .set_msglevel = ql_set_msglevel,
1783 .get_pauseparam = ql_get_pauseparam,
1786 static int ql_populate_free_queue(
struct ql3_adapter *qdev)
1792 while (lrg_buf_cb) {
1793 if (!lrg_buf_cb->
skb) {
1795 netdev_alloc_skb(qdev->
ndev,
1799 "Failed netdev_alloc_skb()\n");
1807 map = pci_map_single(qdev->
pdev,
1808 lrg_buf_cb->
skb->data,
1813 err = pci_dma_mapping_error(qdev->
pdev, map);
1815 netdev_err(qdev->
ndev,
1816 "PCI mapping failed with error: %d\n",
1818 dev_kfree_skb(lrg_buf_cb->
skb);
1837 lrg_buf_cb = lrg_buf_cb->
next;
1845 static void ql_update_small_bufq_prod_index(
struct ql3_adapter *qdev)
1861 &port_regs->
CommonRegs.rxSmallQProducerIndex);
1868 static void ql_update_lrg_bufq_prod_index(
struct ql3_adapter *qdev)
1880 if (!ql_populate_free_queue(qdev))
1888 for (i = 0; i < 8; i++) {
1890 ql_get_from_lrg_buf_free_list(qdev);
1914 &port_regs->
CommonRegs.rxLargeQProducerIndex);
1918 static void ql_process_mac_tx_intr(
struct ql3_adapter *qdev,
1926 netdev_warn(qdev->
ndev,
1927 "Frame too short but it was padded and sent\n");
1934 netdev_err(qdev->
ndev,
1935 "Frame too short to be legal, frame not sent\n");
1937 qdev->
ndev->stats.tx_errors++;
1939 goto frame_not_sent;
1943 netdev_err(qdev->
ndev,
"tx_cb->seg_count == 0: %d\n",
1946 qdev->
ndev->stats.tx_errors++;
1948 goto invalid_seg_count;
1951 pci_unmap_single(qdev->
pdev,
1957 for (i = 1; i < tx_cb->
seg_count; i++) {
1958 pci_unmap_page(qdev->
pdev,
1965 qdev->
ndev->stats.tx_packets++;
1966 qdev->
ndev->stats.tx_bytes += tx_cb->
skb->len;
2005 static void ql_process_mac_rx_intr(
struct ql3_adapter *qdev,
2019 lrg_buf_cb1 = ql_get_lbuf(qdev);
2022 lrg_buf_cb2 = ql_get_lbuf(qdev);
2023 skb = lrg_buf_cb2->
skb;
2025 qdev->
ndev->stats.rx_packets++;
2029 pci_unmap_single(qdev->
pdev,
2034 skb_checksum_none_assert(skb);
2041 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2042 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2045 static void ql_process_macip_rx_intr(
struct ql3_adapter *qdev,
2063 lrg_buf_cb1 = ql_get_lbuf(qdev);
2064 skb1 = lrg_buf_cb1->
skb;
2066 if (*((
u16 *) skb1->
data) != 0xFFFF)
2071 lrg_buf_cb2 = ql_get_lbuf(qdev);
2072 skb2 = lrg_buf_cb2->
skb;
2075 pci_unmap_single(qdev->
pdev,
2081 skb_checksum_none_assert(skb2);
2087 skb_copy_from_linear_data_offset(skb1,
VLAN_ID_LEN,
2095 "%s: Bad checksum for this %s packet, checksum = %x\n",
2098 "TCP" :
"UDP"), checksum);
2108 ndev->
stats.rx_packets++;
2113 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2114 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2117 static int ql_tx_rx_clean(
struct ql3_adapter *qdev,
2118 int *tx_cleaned,
int *rx_cleaned,
int work_to_do)
2136 switch (net_rsp->
opcode) {
2161 "Hit default case, not handled!\n"
2162 " dropping the packet, opcode = %x\n"
2163 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2165 (
unsigned long int)tmp[0],
2166 (
unsigned long int)tmp[1],
2167 (
unsigned long int)tmp[2],
2168 (
unsigned long int)tmp[3]);
2181 work_done = *tx_cleaned + *rx_cleaned;
2190 int rx_cleaned = 0, tx_cleaned = 0;
2195 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2197 if (tx_cleaned + rx_cleaned != budget) {
2200 ql_update_small_bufq_prod_index(qdev);
2201 ql_update_lrg_bufq_prod_index(qdev);
2204 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
2206 ql_enable_interrupts(qdev);
2208 return tx_cleaned + rx_cleaned;
2222 value = ql_read_common_reg_l(qdev,
2227 netif_stop_queue(qdev->
ndev);
2229 ql_disable_interrupts(qdev);
2238 ql_read_page0_reg_l(qdev,
2241 "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2250 "Another function issued a reset to the chip. ISR value = %x\n",
2256 ql_disable_interrupts(qdev);
2272 static int ql_get_seg_count(
struct ql3_adapter *qdev,
unsigned short frags)
2279 else if (frags <= 6)
2281 else if (frags <= 10)
2283 else if (frags <= 14)
2285 else if (frags <= 18)
2290 static void ql_hw_csum_setup(
const struct sk_buff *skb,
2293 const struct iphdr *
ip = ip_hdr(skb);
2295 mac_iocb_ptr->
ip_hdr_off = skb_network_offset(skb);
2319 int len = skb_headlen(skb);
2322 int completed_segs,
i;
2323 int seg_cnt,
seg = 0;
2324 int frag_cnt = (
int)skb_shinfo(skb)->nr_frags;
2332 err = pci_dma_mapping_error(qdev->
pdev, map);
2334 netdev_err(qdev->
ndev,
"PCI mapping failed with error: %d\n",
2340 oal_entry = (
struct oal_entry *)&mac_iocb_ptr->
buf_addr0_low;
2354 for (completed_segs = 0;
2355 completed_segs < frag_cnt;
2356 completed_segs++, seg++) {
2364 if ((seg == 2 && seg_cnt > 3) ||
2365 (seg == 7 && seg_cnt > 8) ||
2366 (seg == 12 && seg_cnt > 13) ||
2367 (seg == 17 && seg_cnt > 18)) {
2368 map = pci_map_single(qdev->
pdev, oal,
2372 err = pci_dma_mapping_error(qdev->
pdev, map);
2374 netdev_err(qdev->
ndev,
2375 "PCI mapping outbound address list with error: %d\n",
2386 sizeof(
struct oal));
2387 oal_entry = (
struct oal_entry *)oal;
2392 map = skb_frag_dma_map(&qdev->
pdev->dev, frag, 0, skb_frag_size(frag),
2397 netdev_err(qdev->
ndev,
2398 "PCI mapping frags failed with error: %d\n",
2420 oal_entry = (
struct oal_entry *)&mac_iocb_ptr->
buf_addr0_low;
2422 for (i = 0; i < completed_segs; i++, seg++) {
2430 if ((seg == 2 && seg_cnt > 3) ||
2431 (seg == 7 && seg_cnt > 8) ||
2432 (seg == 12 && seg_cnt > 13) ||
2433 (seg == 17 && seg_cnt > 18)) {
2434 pci_unmap_single(qdev->
pdev,
2442 pci_unmap_page(qdev->
pdev,
2448 pci_unmap_single(qdev->
pdev,
2482 tx_cb->
seg_count = ql_get_seg_count(qdev,
2483 skb_shinfo(skb)->nr_frags);
2485 netdev_err(ndev,
"%s: invalid segment count!\n", __func__);
2499 ql_hw_csum_setup(skb, mac_iocb_ptr);
2501 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) !=
NETDEV_TX_OK) {
2502 netdev_err(ndev,
"%s: Could not map the segments!\n", __func__);
2511 ql_write_common_reg_l(qdev,
2516 "tx queued, slot %d, len %d\n",
2523 static int ql_alloc_net_req_rsp_queues(
struct ql3_adapter *qdev)
2542 netdev_err(qdev->
ndev,
"reqQ failed\n");
2553 netdev_err(qdev->
ndev,
"rspQ allocation failed\n");
2565 static void ql_free_net_req_rsp_queues(
struct ql3_adapter *qdev)
2568 netdev_info(qdev->
ndev,
"Already done\n");
2587 static int ql_alloc_buffer_queues(
struct ql3_adapter *qdev)
2601 netdev_err(qdev->
ndev,
"qdev->lrg_buf alloc failed\n");
2611 netdev_err(qdev->
ndev,
"lBufQ failed\n");
2631 netdev_err(qdev->
ndev,
"Small Buffer Queue allocation failed\n");
2644 static void ql_free_buffer_queues(
struct ql3_adapter *qdev)
2647 netdev_info(qdev->
ndev,
"Already done\n");
2668 static int ql_alloc_small_buffers(
struct ql3_adapter *qdev)
2684 netdev_err(qdev->
ndev,
"Failed to get small buffer memory\n");
2700 small_buf_q_entry++;
2707 static void ql_free_small_buffers(
struct ql3_adapter *qdev)
2710 netdev_info(qdev->
ndev,
"Already done\n");
2723 static void ql_free_large_buffers(
struct ql3_adapter *qdev)
2730 if (lrg_buf_cb->
skb) {
2731 dev_kfree_skb(lrg_buf_cb->
skb);
2732 pci_unmap_single(qdev->
pdev,
2743 static void ql_init_large_buffers(
struct ql3_adapter *qdev)
2759 static int ql_alloc_large_buffers(
struct ql3_adapter *qdev)
2768 skb = netdev_alloc_skb(qdev->
ndev,
2772 netdev_err(qdev->
ndev,
2773 "large buff alloc failed for %d bytes at index %d\n",
2775 ql_free_large_buffers(qdev);
2788 map = pci_map_single(qdev->
pdev,
2794 err = pci_dma_mapping_error(qdev->
pdev, map);
2796 netdev_err(qdev->
ndev,
2797 "PCI mapping failed with error: %d\n",
2799 ql_free_large_buffers(qdev);
2816 static void ql_free_send_free_list(
struct ql3_adapter *qdev)
2821 tx_cb = &qdev->
tx_buf[0];
2829 static int ql_create_send_free_list(
struct ql3_adapter *qdev)
2849 static int ql_alloc_mem_resources(
struct ql3_adapter *qdev)
2861 netdev_err(qdev->
ndev,
"Invalid mtu size: %d. Only %d and %d are accepted.\n",
2894 netdev_err(qdev->
ndev,
"shadowReg Alloc failed\n");
2898 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2899 netdev_err(qdev->
ndev,
"ql_alloc_net_req_rsp_queues failed\n");
2903 if (ql_alloc_buffer_queues(qdev) != 0) {
2904 netdev_err(qdev->
ndev,
"ql_alloc_buffer_queues failed\n");
2905 goto err_buffer_queues;
2908 if (ql_alloc_small_buffers(qdev) != 0) {
2909 netdev_err(qdev->
ndev,
"ql_alloc_small_buffers failed\n");
2910 goto err_small_buffers;
2913 if (ql_alloc_large_buffers(qdev) != 0) {
2914 netdev_err(qdev->
ndev,
"ql_alloc_large_buffers failed\n");
2915 goto err_small_buffers;
2919 ql_init_large_buffers(qdev);
2920 if (ql_create_send_free_list(qdev))
2927 ql_free_send_free_list(qdev);
2929 ql_free_buffer_queues(qdev);
2931 ql_free_net_req_rsp_queues(qdev);
2941 static void ql_free_mem_resources(
struct ql3_adapter *qdev)
2943 ql_free_send_free_list(qdev);
2944 ql_free_large_buffers(qdev);
2945 ql_free_small_buffers(qdev);
2946 ql_free_buffer_queues(qdev);
2947 ql_free_net_req_rsp_queues(qdev);
2957 static int ql_init_misc_registers(
struct ql3_adapter *qdev)
2967 ql_write_page2_reg(qdev,
2970 ql_write_page2_reg(qdev,
2974 ql_write_page2_reg(qdev,
2976 (qdev->
nvram_data.tcpWindowThreshold25 << 16) |
2979 ql_write_page2_reg(qdev,
2983 ql_write_page2_reg(qdev,
2987 ql_write_page2_reg(qdev,
2990 ql_write_page2_reg(qdev,
2992 (qdev->
nvram_data.tcpHashTableBaseHi << 16) |
2994 ql_write_page2_reg(qdev,
2997 ql_write_page2_reg(qdev,
3001 ql_write_page2_reg(qdev,
3004 ql_write_page2_reg(qdev,
3008 ql_write_page2_reg(qdev,
3015 static int ql_adapter_initialize(
struct ql3_adapter *qdev)
3026 if (ql_mii_setup(qdev))
3030 ql_write_common_reg(qdev, spir,
3039 ql_write_common_reg(qdev, spir,
3048 ql_write_page1_reg(qdev,
3051 ql_write_page1_reg(qdev,
3055 ql_write_page1_reg(qdev,
3058 ql_write_page1_reg(qdev,
3061 ql_write_page1_reg(qdev, &hmem_regs->
reqLength, NUM_REQ_Q_ENTRIES);
3068 ql_write_page1_reg(qdev,
3072 ql_write_page1_reg(qdev,
3076 ql_write_page1_reg(qdev,
3080 ql_write_page1_reg(qdev,
3087 ql_write_page1_reg(qdev,
3091 ql_write_page1_reg(qdev,
3095 ql_write_page1_reg(qdev,
3099 ql_write_page1_reg(qdev,
3104 ql_write_page1_reg(qdev,
3108 ql_write_page1_reg(qdev,
3113 ql_write_page1_reg(qdev,
3128 ql_write_common_reg(qdev,
3130 rxSmallQProducerIndex,
3132 ql_write_common_reg(qdev,
3134 rxLargeQProducerIndex,
3142 value = ql_read_page0_reg(qdev, &port_regs->
portStatus);
3146 if (ql_init_misc_registers(qdev)) {
3152 ql_write_page0_reg(qdev, &port_regs->
tcpMaxWindow, value);
3154 value = (0xFFFF << 16) | qdev->
nvram_data.extHwConfig;
3171 ql_write_page0_reg(qdev,
3175 ql_write_page0_reg(qdev,
3187 ql_init_scan_mode(qdev);
3188 ql_get_phy_owner(qdev);
3196 ((qdev->
ndev->dev_addr[2] << 24)
3197 | (qdev->
ndev->dev_addr[3] << 16)
3198 | (qdev->
ndev->dev_addr[4] << 8)
3199 | qdev->
ndev->dev_addr[5]));
3205 ((qdev->
ndev->dev_addr[0] << 8)
3206 | qdev->
ndev->dev_addr[1]));
3227 ql_write_page0_reg(qdev,
3232 value = ql_read_page0_reg(qdev, &port_regs->
portStatus);
3233 if (value & PORT_STATUS_IC)
3235 spin_unlock_irq(&qdev->
hw_lock);
3237 spin_lock_irq(&qdev->
hw_lock);
3241 netdev_err(qdev->
ndev,
"Hw Initialization timeout\n");
3253 ((value << 16) | value));
3259 ((value << 16) | value));
3270 static int ql_adapter_reset(
struct ql3_adapter *qdev)
3285 ql_write_common_reg(qdev,
3291 "Wait 10 milliseconds for reset to complete\n");
3297 ql_read_common_reg(qdev,
3303 }
while ((--max_wait_time));
3310 ql_read_common_reg(qdev, &port_regs->
CommonRegs.ispControlStatus);
3313 "clearing RI after reset\n");
3314 ql_write_common_reg(qdev,
3317 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3320 if (max_wait_time == 0) {
3322 ql_write_common_reg(qdev,
3333 value = ql_read_common_reg(qdev,
3339 }
while ((--max_wait_time));
3341 if (max_wait_time == 0)
3349 static void ql_set_mac_info(
struct ql3_adapter *qdev)
3358 ql_read_common_reg_l(qdev, &port_regs->
CommonRegs.ispControlStatus);
3360 port_status = ql_read_page0_reg(qdev, &port_regs->
portStatus);
3388 "Invalid function number, ispControlStatus = 0x%x\n",
3395 static void ql_display_dev_info(
struct net_device *ndev)
3401 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3405 netdev_info(ndev,
"%s Interface\n",
3411 netdev_info(ndev,
"Bus interface is %s %s\n",
3412 ((qdev->
pci_width == 64) ?
"64-bit" :
"32-bit"),
3413 ((qdev->
pci_x) ?
"PCI-X" :
"PCI"));
3415 netdev_info(ndev,
"mem IO base address adjusted = 0x%p\n",
3417 netdev_info(ndev,
"Interrupt number = %d\n", pdev->
irq);
3427 netif_stop_queue(ndev);
3433 ql_disable_interrupts(qdev);
3438 netdev_info(qdev->
ndev,
"calling pci_disable_msi()\n");
3445 napi_disable(&qdev->
napi);
3452 if (ql_wait_for_drvr_lock(qdev)) {
3453 soft_reset = ql_adapter_reset(qdev);
3455 netdev_err(ndev,
"ql_adapter_reset(%d) FAILED!\n",
3459 "Releasing driver lock via chip reset\n");
3462 "Could not acquire driver lock to do reset!\n");
3465 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
3467 ql_free_mem_resources(qdev);
3471 static int ql_adapter_up(
struct ql3_adapter *qdev)
3478 if (ql_alloc_mem_resources(qdev)) {
3479 netdev_err(ndev,
"Unable to allocate buffers\n");
3484 if (pci_enable_msi(qdev->
pdev)) {
3486 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
3489 netdev_info(ndev,
"MSI Enabled...\n");
3496 irq_flags, ndev->
name, ndev);
3499 "Failed to reserve interrupt %d - already in use\n",
3506 err = ql_wait_for_drvr_lock(qdev);
3508 err = ql_adapter_initialize(qdev);
3510 netdev_err(ndev,
"Unable to initialize adapter\n");
3513 netdev_err(ndev,
"Releasing driver lock\n");
3516 netdev_err(ndev,
"Could not acquire driver lock\n");
3520 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
3526 napi_enable(&qdev->
napi);
3527 ql_enable_interrupts(qdev);
3533 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
3537 netdev_info(ndev,
"calling pci_disable_msi()\n");
3544 static int ql_cycle_adapter(
struct ql3_adapter *qdev,
int reset)
3546 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3547 netdev_err(qdev->
ndev,
3548 "Driver up/down cycle failed, closing device\n");
3557 static int ql3xxx_close(
struct net_device *ndev)
3572 static int ql3xxx_open(
struct net_device *ndev)
3575 return ql_adapter_up(qdev);
3578 static int ql3xxx_set_mac_address(
struct net_device *ndev,
void *p)
3586 if (netif_running(ndev))
3589 if (!is_valid_ether_addr(addr->
sa_data))
3599 ((ndev->
dev_addr[2] << 24) | (ndev->
3600 dev_addr[3] << 16) |
3608 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
3613 static void ql3xxx_tx_timeout(
struct net_device *ndev)
3617 netdev_err(ndev,
"Resetting...\n");
3621 netif_stop_queue(ndev);
3629 static void ql_reset_work(
struct work_struct *work)
3636 int max_wait_time,
i;
3652 "Freeing lost SKB\n");
3653 pci_unmap_single(qdev->
pdev,
3658 for (j = 1; j < tx_cb->
seg_count; j++) {
3659 pci_unmap_page(qdev->
pdev,
3666 dev_kfree_skb(tx_cb->
skb);
3671 netdev_err(ndev,
"Clearing NRI after reset\n");
3673 ql_write_common_reg(qdev,
3676 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3682 value = ql_read_common_reg(qdev,
3688 "reset completed\n");
3692 if (value & ISP_CONTROL_RI) {
3694 "clearing NRI after reset\n");
3695 ql_write_common_reg(qdev,
3700 16) | ISP_CONTROL_RI));
3703 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
3706 }
while (--max_wait_time);
3707 spin_unlock_irqrestore(&qdev->
hw_lock, hw_flags);
3709 if (value & ISP_CONTROL_SR) {
3716 "Timed out waiting for reset to complete\n");
3717 netdev_err(ndev,
"Do a reset\n");
3731 static void ql_tx_timeout_work(
struct work_struct *work)
3739 static void ql_get_board_info(
struct ql3_adapter *qdev)
3745 value = ql_read_page0_reg_l(qdev, &port_regs->
portStatus);
3759 static void ql3xxx_timer(
unsigned long ptr)
3766 .ndo_open = ql3xxx_open,
3767 .ndo_start_xmit = ql3xxx_send,
3768 .ndo_stop = ql3xxx_close,
3771 .ndo_set_mac_address = ql3xxx_set_mac_address,
3772 .ndo_tx_timeout = ql3xxx_tx_timeout,
3780 static int cards_found;
3785 pr_err(
"%s cannot enable PCI device\n", pci_name(pdev));
3791 pr_err(
"%s cannot obtain PCI resources\n", pci_name(pdev));
3792 goto err_out_disable_pdev;
3799 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
3800 }
else if (!(err = pci_set_dma_mask(pdev,
DMA_BIT_MASK(32)))) {
3802 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
3806 pr_err(
"%s no usable DMA configuration\n", pci_name(pdev));
3807 goto err_out_free_regions;
3810 ndev = alloc_etherdev(
sizeof(
struct ql3_adapter));
3813 goto err_out_free_regions;
3818 pci_set_drvdata(pdev, ndev);
3820 qdev = netdev_priv(ndev);
3821 qdev->
index = cards_found;
3838 pr_err(
"%s: cannot map device registers\n", pci_name(pdev));
3840 goto err_out_free_ndev;
3856 if (ql_get_nvram_params(qdev)) {
3857 pr_alert(
"%s: Adapter #%d, Invalid NVRAM parameters\n",
3858 __func__, qdev->
index);
3860 goto err_out_iounmap;
3863 ql_set_mac_info(qdev);
3868 ql_set_mac_addr(ndev, qdev->
nvram_data.funcCfg_fn2.macAddress);
3871 ql_set_mac_addr(ndev, qdev->
nvram_data.funcCfg_fn0.macAddress);
3878 ql_get_board_info(qdev);
3885 pci_write_config_word(pdev, (
int)0x4e, (
u16) 0x0036);
3889 pr_err(
"%s: cannot register net device\n", pci_name(pdev));
3890 goto err_out_iounmap;
3896 netif_stop_queue(ndev);
3910 pr_alert(
"Driver name: %s, Version: %s\n",
3913 ql_display_dev_info(ndev);
3922 err_out_free_regions:
3924 err_out_disable_pdev:
3926 pci_set_drvdata(pdev,
NULL);
3933 struct net_device *ndev = pci_get_drvdata(pdev);
3938 ql_disable_interrupts(qdev);
3949 pci_set_drvdata(pdev,
NULL);
3956 .id_table = ql3xxx_pci_tbl,
3957 .probe = ql3xxx_probe,
3961 static int __init ql3xxx_init_module(
void)
3963 return pci_register_driver(&ql3xxx_driver);
3966 static void __exit ql3xxx_exit(
void)