19 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
44 #include <linux/prefetch.h>
54 #include <asm/byteorder.h>
58 #include <asm/idprom.h>
84 #define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define DRV_MODULE_NAME "tg3"
93 #define TG3_MIN_NUM 125
94 #define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96 #define DRV_MODULE_RELDATE "September 26, 2012"
98 #define RESET_KIND_SHUTDOWN 0
99 #define RESET_KIND_INIT 1
100 #define RESET_KIND_SUSPEND 2
102 #define TG3_DEF_RX_MODE 0
103 #define TG3_DEF_TX_MODE 0
104 #define TG3_DEF_MSG_ENABLE \
114 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 #define TG3_TX_TIMEOUT (5 * HZ)
123 #define TG3_MIN_MTU 60
124 #define TG3_MAX_MTU(tp) \
125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 #define TG3_RX_STD_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
134 #define TG3_DEF_RX_RING_PENDING 200
135 #define TG3_RX_JMB_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
138 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX_2K 2048
203 #define TG3_TX_BD_DMA_MAX_4K 4096
205 #define TG3_RAW_IP_ALIGN 2
207 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
210 #define FIRMWARE_TG3 "tigon/tg3.bin"
211 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
225 static int tg3_debug = -1;
227 MODULE_PARM_DESC(tg3_debug,
"Tigon3 bitmapped debugging message enable value");
317 static const struct {
319 } ethtool_stats_keys[] = {
322 {
"rx_ucast_packets" },
323 {
"rx_mcast_packets" },
324 {
"rx_bcast_packets" },
326 {
"rx_align_errors" },
327 {
"rx_xon_pause_rcvd" },
328 {
"rx_xoff_pause_rcvd" },
329 {
"rx_mac_ctrl_rcvd" },
330 {
"rx_xoff_entered" },
331 {
"rx_frame_too_long_errors" },
333 {
"rx_undersize_packets" },
334 {
"rx_in_length_errors" },
335 {
"rx_out_length_errors" },
336 {
"rx_64_or_less_octet_packets" },
337 {
"rx_65_to_127_octet_packets" },
338 {
"rx_128_to_255_octet_packets" },
339 {
"rx_256_to_511_octet_packets" },
340 {
"rx_512_to_1023_octet_packets" },
341 {
"rx_1024_to_1522_octet_packets" },
342 {
"rx_1523_to_2047_octet_packets" },
343 {
"rx_2048_to_4095_octet_packets" },
344 {
"rx_4096_to_8191_octet_packets" },
345 {
"rx_8192_to_9022_octet_packets" },
352 {
"tx_flow_control" },
354 {
"tx_single_collisions" },
355 {
"tx_mult_collisions" },
357 {
"tx_excessive_collisions" },
358 {
"tx_late_collisions" },
359 {
"tx_collide_2times" },
360 {
"tx_collide_3times" },
361 {
"tx_collide_4times" },
362 {
"tx_collide_5times" },
363 {
"tx_collide_6times" },
364 {
"tx_collide_7times" },
365 {
"tx_collide_8times" },
366 {
"tx_collide_9times" },
367 {
"tx_collide_10times" },
368 {
"tx_collide_11times" },
369 {
"tx_collide_12times" },
370 {
"tx_collide_13times" },
371 {
"tx_collide_14times" },
372 {
"tx_collide_15times" },
373 {
"tx_ucast_packets" },
374 {
"tx_mcast_packets" },
375 {
"tx_bcast_packets" },
376 {
"tx_carrier_sense_errors" },
380 {
"dma_writeq_full" },
381 {
"dma_write_prioq_full" },
385 {
"rx_threshold_hit" },
387 {
"dma_readq_full" },
388 {
"dma_read_prioq_full" },
389 {
"tx_comp_queue_full" },
391 {
"ring_set_send_prod_index" },
392 {
"ring_status_update" },
394 {
"nic_avoided_irqs" },
395 {
"nic_tx_threshold_hit" },
397 {
"mbuf_lwm_thresh_hit" },
400 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
403 static const struct {
405 } ethtool_test_keys[] = {
406 {
"nvram test (online) " },
407 {
"link test (online) " },
408 {
"register test (offline)" },
409 {
"memory test (offline)" },
410 {
"mac loopback test (offline)" },
411 {
"phy loopback test (offline)" },
412 {
"ext loopback test (offline)" },
413 {
"interrupt test (offline)" },
416 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
419 static void tg3_write32(
struct tg3 *tp,
u32 off,
u32 val)
424 static u32 tg3_read32(
struct tg3 *tp,
u32 off)
429 static void tg3_ape_write32(
struct tg3 *tp,
u32 off,
u32 val)
434 static u32 tg3_ape_read32(
struct tg3 *tp,
u32 off)
439 static void tg3_write_indirect_reg32(
struct tg3 *tp,
u32 off,
u32 val)
449 static void tg3_write_flush_reg32(
struct tg3 *tp,
u32 off,
u32 val)
455 static u32 tg3_read_indirect_reg32(
struct tg3 *tp,
u32 off)
467 static void tg3_write_indirect_mbox(
struct tg3 *tp,
u32 off,
u32 val)
497 static u32 tg3_read_indirect_mbox(
struct tg3 *tp,
u32 off)
514 static void _tw32_flush(
struct tg3 *tp,
u32 off,
u32 val,
u32 usec_wait)
521 tg3_write32(tp, off, val);
533 static inline void tw32_mailbox_flush(
struct tg3 *tp,
u32 off,
u32 val)
540 static void tg3_write32_tx_mbox(
struct tg3 *tp,
u32 off,
u32 val)
546 if (
tg3_flag(tp, MBOX_WRITE_REORDER))
550 static u32 tg3_read32_mbox_5906(
struct tg3 *tp,
u32 off)
555 static void tg3_write32_mbox_5906(
struct tg3 *tp,
u32 off,
u32 val)
560 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
561 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
562 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
563 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
564 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
566 #define tw32(reg, val) tp->write32(tp, reg, val)
567 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
568 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
569 #define tr32(reg) tp->read32(tp, reg)
571 static void tg3_write_mem(
struct tg3 *tp,
u32 off,
u32 val)
580 if (
tg3_flag(tp, SRAM_USE_CONFIG)) {
596 static void tg3_read_mem(
struct tg3 *tp,
u32 off,
u32 *val)
607 if (
tg3_flag(tp, SRAM_USE_CONFIG)) {
623 static void tg3_ape_lock_init(
struct tg3 *tp)
648 tg3_ape_write32(tp, regbase + 4 * i, bit);
653 static int tg3_ape_lock(
struct tg3 *tp,
int locknum)
693 tg3_ape_write32(tp, req + off, bit);
696 for (i = 0; i < 100; i++) {
697 status = tg3_ape_read32(tp, gnt + off);
705 tg3_ape_write32(tp, gnt + off, bit);
712 static void tg3_ape_unlock(
struct tg3 *tp,
int locknum)
745 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 static int tg3_ape_event_lock(
struct tg3 *tp,
u32 timeout_us)
763 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766 return timeout_us ? 0 : -
EBUSY;
769 static int tg3_ape_wait_for_event(
struct tg3 *tp,
u32 timeout_us)
773 for (i = 0; i < timeout_us / 10; i++) {
782 return i == timeout_us / 10;
785 static int tg3_ape_scratchpad_read(
struct tg3 *tp,
u32 *
data,
u32 base_off,
804 msgoff = bufoff + 2 *
sizeof(
u32);
811 length = (len > maxlen) ? maxlen : len;
815 if (!(apedata & APE_FW_STATUS_READY))
819 err = tg3_ape_event_lock(tp, 1000);
828 tg3_ape_write32(tp, bufoff, base_off);
829 tg3_ape_write32(tp, bufoff +
sizeof(
u32), length);
836 if (tg3_ape_wait_for_event(tp, 30000))
839 for (i = 0;
length; i += 4, length -= 4) {
840 u32 val = tg3_ape_read32(tp, msgoff + i);
849 static int tg3_ape_send_event(
struct tg3 *tp,
u32 event)
859 if (!(apedata & APE_FW_STATUS_READY))
863 err = tg3_ape_event_lock(tp, 1000);
876 static void tg3_ape_driver_state_change(
struct tg3 *tp,
int kind)
909 if (device_may_wakeup(&tp->
pdev->dev) &&
930 tg3_ape_send_event(tp, event);
933 static void tg3_disable_ints(
struct tg3 *tp)
939 for (i = 0; i < tp->
irq_max; i++)
943 static void tg3_enable_ints(
struct tg3 *tp)
954 for (i = 0; i < tp->
irq_cnt; i++) {
974 static inline unsigned int tg3_has_work(
struct tg3_napi *tnapi)
976 struct tg3 *tp = tnapi->
tp;
978 unsigned int work_exists = 0;
987 if (sblk->
idx[0].tx_consumer != tnapi->
tx_cons)
1003 static void tg3_int_reenable(
struct tg3_napi *tnapi)
1005 struct tg3 *tp = tnapi->
tp;
1014 if (!
tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1019 static void tg3_switch_clocks(
struct tg3 *tp)
1022 u32 orig_clock_ctrl;
1029 orig_clock_ctrl = clock_ctrl;
1038 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1052 #define PHY_BUSY_LOOPS 5000
1054 static int tg3_readphy(
struct tg3 *tp,
int reg,
u32 *val)
1079 while (loops != 0) {
1107 static int tg3_writephy(
struct tg3 *tp,
int reg,
u32 val)
1135 while (loops != 0) {
1160 static int tg3_phy_cl45_write(
struct tg3 *tp,
u32 devad,
u32 addr,
u32 val)
1183 static int tg3_phy_cl45_read(
struct tg3 *tp,
u32 devad,
u32 addr,
u32 *val)
1206 static int tg3_phydsp_read(
struct tg3 *tp,
u32 reg,
u32 *val)
1217 static int tg3_phydsp_write(
struct tg3 *tp,
u32 reg,
u32 val)
1228 static int tg3_phy_auxctl_read(
struct tg3 *tp,
int reg,
u32 *val)
1241 static int tg3_phy_auxctl_write(
struct tg3 *tp,
int reg,
u32 set)
1249 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1250 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1251 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1252 MII_TG3_AUXCTL_ACTL_TX_6DB)
1254 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1255 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1256 MII_TG3_AUXCTL_ACTL_TX_6DB);
1258 static int tg3_bmcr_reset(
struct tg3 *tp)
1267 err = tg3_writephy(tp,
MII_BMCR, phy_control);
1273 err = tg3_readphy(tp,
MII_BMCR, &phy_control);
1289 static int tg3_mdio_read(
struct mii_bus *bp,
int mii_id,
int reg)
1294 spin_lock_bh(&tp->
lock);
1296 if (tg3_readphy(tp, reg, &val))
1299 spin_unlock_bh(&tp->
lock);
1304 static int tg3_mdio_write(
struct mii_bus *bp,
int mii_id,
int reg,
u16 val)
1309 spin_lock_bh(&tp->
lock);
1311 if (tg3_writephy(tp, reg, val))
1314 spin_unlock_bh(&tp->
lock);
1319 static int tg3_mdio_reset(
struct mii_bus *bp)
1324 static void tg3_mdio_config_5785(
struct tg3 *tp)
1330 switch (phydev->
drv->phy_id & phydev->
drv->phy_id_mask) {
1360 if (!
tg3_flag(tp, RGMII_INBAND_DISABLE))
1373 if (!
tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1374 if (
tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376 if (
tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1391 if (!
tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1392 if (
tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1397 if (
tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1405 static void tg3_mdio_start(
struct tg3 *tp)
1411 if (
tg3_flag(tp, MDIOBUS_INITED) &&
1413 tg3_mdio_config_5785(tp);
1416 static int tg3_mdio_init(
struct tg3 *tp)
1446 tp->
mdio_bus->name =
"tg3 mdio bus";
1448 (tp->
pdev->bus->number << 8) | tp->
pdev->devfn);
1451 tp->
mdio_bus->read = &tg3_mdio_read;
1452 tp->
mdio_bus->write = &tg3_mdio_write;
1453 tp->
mdio_bus->reset = &tg3_mdio_reset;
1470 dev_warn(&tp->
pdev->dev,
"mdiobus_reg failed (0x%x)\n", i);
1477 if (!phydev || !phydev->
drv) {
1484 switch (phydev->
drv->phy_id & phydev->
drv->phy_id_mask) {
1495 if (
tg3_flag(tp, RGMII_INBAND_DISABLE))
1497 if (
tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499 if (
tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1516 tg3_mdio_config_5785(tp);
1521 static void tg3_mdio_fini(
struct tg3 *tp)
1523 if (
tg3_flag(tp, MDIOBUS_INITED)) {
1531 static inline void tg3_generate_fw_event(
struct tg3 *tp)
1542 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1545 static void tg3_wait_for_event_ack(
struct tg3 *tp)
1548 unsigned int delay_cnt;
1555 if (time_remain < 0)
1562 delay_cnt = (delay_cnt >> 3) + 1;
1564 for (i = 0; i < delay_cnt; i++) {
1572 static void tg3_phy_gather_ump_data(
struct tg3 *tp,
u32 *data)
1577 if (!tg3_readphy(tp,
MII_BMCR, ®))
1579 if (!tg3_readphy(tp,
MII_BMSR, ®))
1580 val |= (reg & 0xffff);
1586 if (!tg3_readphy(tp,
MII_LPA, ®))
1587 val |= (reg & 0xffff);
1595 val |= (reg & 0xffff);
1607 static void tg3_ump_link_report(
struct tg3 *tp)
1614 tg3_phy_gather_ump_data(tp, data);
1616 tg3_wait_for_event_ack(tp);
1625 tg3_generate_fw_event(tp);
1629 static void tg3_stop_fw(
struct tg3 *tp)
1633 tg3_wait_for_event_ack(tp);
1637 tg3_generate_fw_event(tp);
1640 tg3_wait_for_event_ack(tp);
1645 static void tg3_write_sig_pre_reset(
struct tg3 *tp,
int kind)
1650 if (
tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1674 tg3_ape_driver_state_change(tp, kind);
1678 static void tg3_write_sig_post_reset(
struct tg3 *tp,
int kind)
1680 if (
tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1698 tg3_ape_driver_state_change(tp, kind);
1702 static void tg3_write_sig_legacy(
struct tg3 *tp,
int kind)
1727 static int tg3_poll_fw(
struct tg3 *tp)
1734 for (i = 0; i < 200; i++) {
1743 for (i = 0; i < 100000; i++) {
1755 if (i >= 100000 && !
tg3_flag(tp, NO_FWARE_REPORTED)) {
1758 netdev_info(tp->
dev,
"No firmware running\n");
1771 static void tg3_link_report(
struct tg3 *tp)
1773 if (!netif_carrier_ok(tp->
dev)) {
1775 tg3_ump_link_report(tp);
1777 netdev_info(tp->
dev,
"Link is up at %d Mbps, %s duplex\n",
1785 netdev_info(tp->
dev,
"Flow control is %s for TX and %s for RX\n",
1792 netdev_info(tp->
dev,
"EEE is %s\n",
1793 tp->
setlpicnt ?
"enabled" :
"disabled");
1795 tg3_ump_link_report(tp);
1805 else if (flow_ctrl & FLOW_CTRL_TX)
1807 else if (flow_ctrl & FLOW_CTRL_RX)
1815 static u8 tg3_resolve_flowctrl_1000X(
u16 lcladv,
u16 rmtadv)
1822 if (lcladv & ADVERTISE_1000XPAUSE)
1824 if (rmtadv & ADVERTISE_1000XPAUSE)
1831 static void tg3_setup_flow_control(
struct tg3 *tp,
u32 lcladv,
u32 rmtadv)
1845 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1853 if (flowctrl & FLOW_CTRL_RX)
1858 if (old_rx_mode != tp->
rx_mode)
1861 if (flowctrl & FLOW_CTRL_TX)
1866 if (old_tx_mode != tp->
tx_mode)
1872 u8 oldflowctrl, linkmesg = 0;
1873 u32 mac_mode, lcl_adv, rmt_adv;
1874 struct tg3 *tp = netdev_priv(dev);
1877 spin_lock_bh(&tp->
lock);
1899 lcl_adv = mii_advertise_flowctrl(
1908 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1948 spin_unlock_bh(&tp->
lock);
1951 tg3_link_report(tp);
1954 static int tg3_phy_init(
struct tg3 *tp)
1969 if (IS_ERR(phydev)) {
1970 dev_err(&tp->
pdev->dev,
"Could not attach to PHY\n");
1971 return PTR_ERR(phydev);
2002 static void tg3_phy_start(
struct tg3 *tp)
2024 static void tg3_phy_stop(
struct tg3 *tp)
2032 static void tg3_phy_fini(
struct tg3 *tp)
2040 static int tg3_phy_set_extloopbk(
struct tg3 *tp)
2050 err = tg3_phy_auxctl_write(tp,
2057 err = tg3_phy_auxctl_read(tp,
2063 err = tg3_phy_auxctl_write(tp,
2070 static void tg3_phy_fet_toggle_apd(
struct tg3 *tp,
bool enable)
2090 static void tg3_phy_toggle_apd(
struct tg3 *tp,
bool enable)
2100 tg3_phy_fet_toggle_apd(tp, enable);
2125 static void tg3_phy_toggle_automdix(
struct tg3 *tp,
int enable)
2141 if (!tg3_readphy(tp, reg, &phy)) {
2146 tg3_writephy(tp, reg, phy);
2153 ret = tg3_phy_auxctl_read(tp,
2160 tg3_phy_auxctl_write(tp,
2166 static void tg3_phy_set_wirespeed(
struct tg3 *tp)
2180 static void tg3_phy_apply_otp(
struct tg3 *tp)
2217 static void tg3_phy_eee_adjust(
struct tg3 *tp,
u32 current_link_up)
2227 current_link_up == 1 &&
2249 if (current_link_up == 1 &&
2260 static void tg3_phy_eee_enable(
struct tg3 *tp)
2279 static int tg3_wait_macro_done(
struct tg3 *tp)
2287 if ((tmp32 & 0x1000) == 0)
2297 static int tg3_phy_write_and_check_testpat(
struct tg3 *tp,
int *resetp)
2299 static const u32 test_pat[4][6] = {
2300 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2301 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2302 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2303 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2307 for (chan = 0; chan < 4; chan++) {
2311 (chan * 0x2000) | 0x0200);
2314 for (i = 0; i < 6; i++)
2319 if (tg3_wait_macro_done(tp)) {
2325 (chan * 0x2000) | 0x0200);
2327 if (tg3_wait_macro_done(tp)) {
2333 if (tg3_wait_macro_done(tp)) {
2338 for (i = 0; i < 6; i += 2) {
2343 tg3_wait_macro_done(tp)) {
2349 if (low != test_pat[chan][i] ||
2350 high != test_pat[chan][i+1]) {
2363 static int tg3_phy_reset_chanpat(
struct tg3 *tp)
2367 for (chan = 0; chan < 4; chan++) {
2371 (chan * 0x2000) | 0x0200);
2373 for (i = 0; i < 6; i++)
2376 if (tg3_wait_macro_done(tp))
2383 static int tg3_phy_reset_5703_4_5(
struct tg3 *tp)
2385 u32 reg32, phy9_orig;
2392 err = tg3_bmcr_reset(tp);
2421 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2426 }
while (--retries);
2428 err = tg3_phy_reset_chanpat(tp);
2432 tg3_phydsp_write(tp, 0x8005, 0x0000);
2453 static int tg3_phy_reset(
struct tg3 *tp)
2463 err = tg3_readphy(tp,
MII_BMSR, &val);
2464 err |= tg3_readphy(tp,
MII_BMSR, &val);
2468 if (netif_running(tp->
dev) && netif_carrier_ok(tp->
dev)) {
2470 tg3_link_report(tp);
2476 err = tg3_phy_reset_5703_4_5(tp);
2488 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2491 err = tg3_bmcr_reset(tp);
2507 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2517 tg3_phy_apply_otp(tp);
2520 tg3_phy_toggle_apd(tp,
true);
2522 tg3_phy_toggle_apd(tp,
false);
2527 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2528 tg3_phydsp_write(tp, 0x000a, 0x0323);
2539 tg3_phydsp_write(tp, 0x000a, 0x310b);
2540 tg3_phydsp_write(tp, 0x201f, 0x9506);
2541 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2563 }
else if (
tg3_flag(tp, JUMBO_CAPABLE)) {
2565 err = tg3_phy_auxctl_read(tp,
2586 tg3_phy_toggle_automdix(tp, 1);
2587 tg3_phy_set_wirespeed(tp);
2591 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2592 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2593 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2594 TG3_GPIO_MSG_NEED_VAUX)
2595 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2596 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2597 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2598 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2599 (TG3_GPIO_MSG_DRVR_PRES << 12))
2601 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2602 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2603 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2604 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2605 (TG3_GPIO_MSG_NEED_VAUX << 12))
2607 static inline u32 tg3_set_function_status(
struct tg3 *tp,
u32 newstat)
2619 status |= (newstat << shift);
2630 static inline int tg3_pwrsrc_switch_to_vmain(
struct tg3 *tp)
2655 static void tg3_pwrsrc_die_with_vmain(
struct tg3 *tp)
2679 static void tg3_pwrsrc_switch_to_vaux(
struct tg3 *tp)
2714 u32 grc_local_ctrl = 0;
2756 static void tg3_frob_aux_power_5717(
struct tg3 *tp,
bool wol_enable)
2767 msg = tg3_set_function_status(tp, msg);
2773 tg3_pwrsrc_switch_to_vaux(tp);
2775 tg3_pwrsrc_die_with_vmain(tp);
2781 static void tg3_frob_aux_power(
struct tg3 *tp,
bool include_wol)
2783 bool need_vaux =
false;
2792 tg3_frob_aux_power_5717(tp, include_wol ?
2793 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2800 dev_peer = pci_get_drvdata(tp->
pdev_peer);
2804 struct tg3 *tp_peer = netdev_priv(dev_peer);
2806 if (
tg3_flag(tp_peer, INIT_COMPLETE))
2809 if ((include_wol &&
tg3_flag(tp_peer, WOL_ENABLE)) ||
2815 if ((include_wol &&
tg3_flag(tp, WOL_ENABLE)) ||
2820 tg3_pwrsrc_switch_to_vaux(tp);
2822 tg3_pwrsrc_die_with_vmain(tp);
2825 static int tg3_5700_link_polarity(
struct tg3 *tp,
u32 speed)
2838 static void tg3_power_down_phy(
struct tg3 *tp,
bool do_low_power)
2881 }
else if (do_low_power) {
2914 static int tg3_nvram_lock(
struct tg3 *tp)
2921 for (i = 0; i < 8000; i++) {
2937 static void tg3_nvram_unlock(
struct tg3 *tp)
2948 static void tg3_enable_nvram_access(
struct tg3 *tp)
2958 static void tg3_disable_nvram_access(
struct tg3 *tp)
2967 static int tg3_nvram_read_using_eeprom(
struct tg3 *tp,
2986 for (i = 0; i < 1000; i++) {
3007 #define NVRAM_CMD_TIMEOUT 10000
3009 static int tg3_nvram_exec_cmd(
struct tg3 *tp,
u32 nvram_cmd)
3022 if (i == NVRAM_CMD_TIMEOUT)
3028 static u32 tg3_nvram_phys_addr(
struct tg3 *tp,
u32 addr)
3033 !
tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3043 static u32 tg3_nvram_logical_addr(
struct tg3 *tp,
u32 addr)
3048 !
tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3064 static int tg3_nvram_read(
struct tg3 *tp,
u32 offset,
u32 *val)
3069 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071 offset = tg3_nvram_phys_addr(tp, offset);
3076 ret = tg3_nvram_lock(tp);
3080 tg3_enable_nvram_access(tp);
3089 tg3_disable_nvram_access(tp);
3091 tg3_nvram_unlock(tp);
3097 static int tg3_nvram_read_be32(
struct tg3 *tp,
u32 offset,
__be32 *val)
3100 int res = tg3_nvram_read(tp, offset, &v);
3106 static int tg3_nvram_write_block_using_eeprom(
struct tg3 *tp,
3112 for (i = 0; i < len; i += 4) {
3118 memcpy(&data, buf + i, 4);
3139 for (j = 0; j < 1000; j++) {
3156 static int tg3_nvram_write_block_unbuffered(
struct tg3 *tp,
u32 offset,
u32 len,
3161 u32 pagemask = pagesize - 1;
3173 phy_addr = offset & ~pagemask;
3175 for (j = 0; j < pagesize; j += 4) {
3176 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3184 page_off = offset & pagemask;
3191 memcpy(tmp + page_off, buf, size);
3193 offset = offset + (pagesize - page_off);
3195 tg3_enable_nvram_access(tp);
3203 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3212 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3218 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3221 for (j = 0; j < pagesize; j += 4) {
3224 data = *((
__be32 *) (tmp + j));
3235 else if (j == (pagesize - 4))
3238 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3247 tg3_nvram_exec_cmd(tp, nvram_cmd);
3255 static int tg3_nvram_write_block_buffered(
struct tg3 *tp,
u32 offset,
u32 len,
3260 for (i = 0; i < len; i += 4, offset += 4) {
3264 memcpy(&data, buf + i, 4);
3269 phy_addr = tg3_nvram_phys_addr(tp, offset);
3273 if (page_off == 0 || i == 0)
3289 (nvram_cmd & NVRAM_CMD_FIRST)) {
3293 ret = tg3_nvram_exec_cmd(tp, cmd);
3302 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3310 static int tg3_nvram_write_block(
struct tg3 *tp,
u32 offset,
u32 len,
u8 *buf)
3314 if (
tg3_flag(tp, EEPROM_WRITE_PROT)) {
3321 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3325 ret = tg3_nvram_lock(tp);
3329 tg3_enable_nvram_access(tp);
3337 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3340 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3347 tg3_disable_nvram_access(tp);
3348 tg3_nvram_unlock(tp);
3351 if (
tg3_flag(tp, EEPROM_WRITE_PROT)) {
3359 #define RX_CPU_SCRATCH_BASE 0x30000
3360 #define RX_CPU_SCRATCH_SIZE 0x04000
3361 #define TX_CPU_SCRATCH_BASE 0x34000
3362 #define TX_CPU_SCRATCH_SIZE 0x04000
3365 static int tg3_halt_cpu(
struct tg3 *tp,
u32 offset)
3378 for (i = 0; i < 10000; i++) {
3389 for (i = 0; i < 10000; i++) {
3398 netdev_err(tp->
dev,
"%s timed out, %s CPU\n",
3416 static int tg3_load_firmware_cpu(
struct tg3 *tp,
u32 cpu_base,
3417 u32 cpu_scratch_base,
int cpu_scratch_size,
3420 int err, lock_err,
i;
3425 "%s: Trying to load TX cpu firmware which is 5705\n",
3431 write_op = tg3_write_mem;
3433 write_op = tg3_write_indirect_reg32;
3438 lock_err = tg3_nvram_lock(tp);
3439 err = tg3_halt_cpu(tp, cpu_base);
3441 tg3_nvram_unlock(tp);
3445 for (i = 0; i < cpu_scratch_size; i +=
sizeof(
u32))
3446 write_op(tp, cpu_scratch_base + i, 0);
3449 for (i = 0; i < (info->
fw_len /
sizeof(
u32)); i++)
3450 write_op(tp, (cpu_scratch_base +
3462 static int tg3_load_5701_a0_firmware_fix(
struct tg3 *tp)
3468 fw_data = (
void *)tp->
fw->data;
3496 for (i = 0; i < 5; i++) {
3505 netdev_err(tp->
dev,
"%s fails to set RX CPU PC, is %08x "
3506 "should be %08x\n", __func__,
3517 static int tg3_load_tso_firmware(
struct tg3 *tp)
3521 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3529 fw_data = (
void *)tp->
fw->data;
3538 cpu_scratch_size = tp->
fw_len;
3551 err = tg3_load_firmware_cpu(tp, cpu_base,
3552 cpu_scratch_base, cpu_scratch_size,
3561 for (i = 0; i < 5; i++) {
3571 "%s fails to set CPU PC, is %08x should be %08x\n",
3582 static void __tg3_set_mac_addr(
struct tg3 *tp,
int skip_mac_1)
3587 addr_high = ((tp->
dev->dev_addr[0] << 8) |
3588 tp->
dev->dev_addr[1]);
3589 addr_low = ((tp->
dev->dev_addr[2] << 24) |
3590 (tp->
dev->dev_addr[3] << 16) |
3591 (tp->
dev->dev_addr[4] << 8) |
3592 (tp->
dev->dev_addr[5] << 0));
3593 for (i = 0; i < 4; i++) {
3594 if (i == 1 && skip_mac_1)
3602 for (i = 0; i < 12; i++) {
3608 addr_high = (tp->
dev->dev_addr[0] +
3609 tp->
dev->dev_addr[1] +
3610 tp->
dev->dev_addr[2] +
3611 tp->
dev->dev_addr[3] +
3612 tp->
dev->dev_addr[4] +
3613 tp->
dev->dev_addr[5]) &
3618 static void tg3_enable_register_access(
struct tg3 *tp)
3624 pci_write_config_dword(tp->
pdev,
3628 static int tg3_power_up(
struct tg3 *tp)
3632 tg3_enable_register_access(tp);
3637 tg3_pwrsrc_switch_to_vmain(tp);
3639 netdev_err(tp->
dev,
"Transition to D0 failed\n");
3645 static int tg3_setup_phy(
struct tg3 *,
int);
3647 static int tg3_power_down_prepare(
struct tg3 *tp)
3650 bool device_should_wake, do_low_power;
3652 tg3_enable_register_access(tp);
3663 device_should_wake = device_may_wakeup(&tp->
pdev->dev) &&
3667 do_low_power =
false;
3687 if (
tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3701 phyid = phydev->
drv->phy_id & phydev->
drv->phy_id_mask;
3707 do_low_power =
true;
3711 do_low_power =
true;
3717 tg3_setup_phy(tp, 0);
3725 }
else if (!
tg3_flag(tp, ENABLE_ASF)) {
3729 for (i = 0; i < 200; i++) {
3742 if (device_should_wake) {
3748 tg3_phy_auxctl_write(tp,
3764 u32 speed =
tg3_flag(tp, WOL_SPEED_100MB) ?
3766 if (tg3_5700_link_polarity(tp, speed))
3795 if (!
tg3_flag(tp, WOL_SPEED_100MB) &&
3806 }
else if (
tg3_flag(tp, 5780_CLASS) ||
3811 u32 newbits1, newbits2;
3819 }
else if (
tg3_flag(tp, 5705_PLUS)) {
3850 if (!(device_should_wake) && !
tg3_flag(tp, ENABLE_ASF))
3851 tg3_power_down_phy(tp, do_low_power);
3853 tg3_frob_aux_power(tp,
true);
3858 u32 val =
tr32(0x7d00);
3860 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3865 err = tg3_nvram_lock(tp);
3868 tg3_nvram_unlock(tp);
3877 static void tg3_power_down(
struct tg3 *tp)
3879 tg3_power_down_prepare(tp);
3885 static void tg3_aux_stat_to_speed_duplex(
struct tg3 *tp, u32 val,
u16 *speed,
u8 *
duplex)
3932 static int tg3_phy_autoneg_cfg(
struct tg3 *tp, u32 advertise, u32 flowctrl)
3938 new_adv |= ethtool_adv_to_mii_adv_t(advertise) &
ADVERTISE_ALL;
3939 new_adv |= mii_advertise_flowctrl(flowctrl);
3946 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4005 static void tg3_phy_copper_begin(
struct tg3 *tp)
4028 tg3_phy_autoneg_cfg(tp, adv, fc);
4034 u32 bmcr, orig_bmcr;
4057 if (!tg3_readphy(tp,
MII_BMCR, &orig_bmcr) &&
4058 (bmcr != orig_bmcr)) {
4060 for (i = 0; i < 1500; i++) {
4064 if (tg3_readphy(tp,
MII_BMSR, &tmp) ||
4078 static int tg3_init_5401phy_dsp(
struct tg3 *tp)
4086 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4087 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4088 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4089 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4090 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4097 static bool tg3_phy_copper_an_config_ok(
struct tg3 *tp, u32 *lcladv)
4102 tgtadv = ethtool_adv_to_mii_adv_t(advertising) &
ADVERTISE_ALL;
4106 tgtadv |= mii_advertise_flowctrl(tp->
link_config.flowctrl);
4113 if ((*lcladv & advmsk) != tgtadv)
4119 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4134 if (tg3_ctrl != tgtadv)
4141 static bool tg3_phy_copper_fetch_rmtadv(
struct tg3 *tp, u32 *rmtadv)
4151 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4154 if (tg3_readphy(tp,
MII_LPA, rmtadv))
4157 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4163 static int tg3_setup_copper_phy(
struct tg3 *tp,
int force_reset)
4165 int current_link_up;
4167 u32 lcl_adv, rmt_adv;
4195 netif_carrier_ok(tp->
dev)) {
4197 if (!tg3_readphy(tp,
MII_BMSR, &bmsr) &&
4206 if (tg3_readphy(tp,
MII_BMSR, &bmsr) ||
4211 err = tg3_init_5401phy_dsp(tp);
4216 for (i = 0; i < 1000; i++) {
4218 if (!tg3_readphy(tp,
MII_BMSR, &bmsr) &&
4219 (bmsr & BMSR_LSTATUS)) {
4227 !(bmsr & BMSR_LSTATUS) &&
4229 err = tg3_phy_reset(tp);
4231 err = tg3_init_5401phy_dsp(tp);
4239 tg3_writephy(tp, 0x15, 0x0a75);
4263 current_link_up = 0;
4270 err = tg3_phy_auxctl_read(tp,
4273 if (!err && !(val & (1 << 10))) {
4274 tg3_phy_auxctl_write(tp,
4282 for (i = 0; i < 100; i++) {
4284 if (!tg3_readphy(tp,
MII_BMSR, &bmsr) &&
4285 (bmsr & BMSR_LSTATUS))
4290 if (bmsr & BMSR_LSTATUS) {
4294 for (i = 0; i < 2000; i++) {
4301 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4306 for (i = 0; i < 200; i++) {
4308 if (tg3_readphy(tp,
MII_BMCR, &bmcr))
4310 if (bmcr && bmcr != 0x7fff)
4323 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4324 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4325 current_link_up = 1;
4327 if (!(bmcr & BMCR_ANENABLE) &&
4332 current_link_up = 1;
4336 if (current_link_up == 1 &&
4348 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4351 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4357 tg3_phy_copper_begin(tp);
4360 if ((!tg3_readphy(tp,
MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4362 current_link_up = 1;
4366 if (current_link_up == 1) {
4382 if (current_link_up == 1 &&
4383 tg3_5700_link_polarity(tp, tp->
link_config.active_speed))
4402 tg3_phy_eee_adjust(tp, current_link_up);
4404 if (
tg3_flag(tp, USE_LINKCHG_REG)) {
4413 current_link_up == 1 &&
4437 if (current_link_up != netif_carrier_ok(tp->
dev)) {
4438 if (current_link_up)
4442 tg3_link_report(tp);
4450 #define ANEG_STATE_UNKNOWN 0
4451 #define ANEG_STATE_AN_ENABLE 1
4452 #define ANEG_STATE_RESTART_INIT 2
4453 #define ANEG_STATE_RESTART 3
4454 #define ANEG_STATE_DISABLE_LINK_OK 4
4455 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4456 #define ANEG_STATE_ABILITY_DETECT 6
4457 #define ANEG_STATE_ACK_DETECT_INIT 7
4458 #define ANEG_STATE_ACK_DETECT 8
4459 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4460 #define ANEG_STATE_COMPLETE_ACK 10
4461 #define ANEG_STATE_IDLE_DETECT_INIT 11
4462 #define ANEG_STATE_IDLE_DETECT 12
4463 #define ANEG_STATE_LINK_OK 13
4464 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4465 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4468 #define MR_AN_ENABLE 0x00000001
4469 #define MR_RESTART_AN 0x00000002
4470 #define MR_AN_COMPLETE 0x00000004
4471 #define MR_PAGE_RX 0x00000008
4472 #define MR_NP_LOADED 0x00000010
4473 #define MR_TOGGLE_TX 0x00000020
4474 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4475 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4476 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4477 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4478 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4479 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4480 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4481 #define MR_TOGGLE_RX 0x00002000
4482 #define MR_NP_RX 0x00004000
4484 #define MR_LINK_OK 0x80000000
4494 #define ANEG_CFG_NP 0x00000080
4495 #define ANEG_CFG_ACK 0x00000040
4496 #define ANEG_CFG_RF2 0x00000020
4497 #define ANEG_CFG_RF1 0x00000010
4498 #define ANEG_CFG_PS2 0x00000001
4499 #define ANEG_CFG_PS1 0x00008000
4500 #define ANEG_CFG_HD 0x00004000
4501 #define ANEG_CFG_FD 0x00002000
4502 #define ANEG_CFG_INVAL 0x00001f06
4507 #define ANEG_TIMER_ENAB 2
4508 #define ANEG_FAILED -1
4510 #define ANEG_STATE_SETTLE_TIME 10000
4512 static int tg3_fiber_aneg_smachine(
struct tg3 *tp,
4516 unsigned long delta;
4564 switch (ap->
state) {
4615 flowctrl = tg3_advert_flowctrl_1000X(tp->
link_config.flowctrl);
4616 if (flowctrl & ADVERTISE_1000XPAUSE)
4618 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4764 static int fiber_autoneg(
struct tg3 *tp, u32 *txflags, u32 *rxflags)
4781 memset(&aninfo, 0,
sizeof(aninfo));
4784 aninfo.cur_time = 0;
4786 while (++tick < 195000) {
4787 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4798 *txflags = aninfo.txconfig;
4799 *rxflags = aninfo.flags;
4809 static void tg3_init_bcm8002(
struct tg3 *tp)
4820 tg3_writephy(tp, 0x16, 0x8007);
4827 for (i = 0; i < 500; i++)
4831 tg3_writephy(tp, 0x10, 0x8411);
4834 tg3_writephy(tp, 0x11, 0x0a10);
4836 tg3_writephy(tp, 0x18, 0x00a0);
4837 tg3_writephy(tp, 0x16, 0x41ff);
4840 tg3_writephy(tp, 0x13, 0x0400);
4842 tg3_writephy(tp, 0x13, 0x0000);
4844 tg3_writephy(tp, 0x11, 0x0a50);
4846 tg3_writephy(tp, 0x11, 0x0a10);
4850 for (i = 0; i < 15000; i++)
4856 tg3_writephy(tp, 0x10, 0x8011);
4859 static int tg3_setup_fiber_hw_autoneg(
struct tg3 *tp, u32 mac_status)
4862 u32 sg_dig_ctrl, sg_dig_status;
4863 u32 serdes_cfg, expected_sg_dig_ctrl;
4864 int workaround, port_a;
4865 int current_link_up;
4868 expected_sg_dig_ctrl = 0;
4871 current_link_up = 0;
4889 u32 val = serdes_cfg;
4900 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4901 tg3_setup_flow_control(tp, 0, 0);
4902 current_link_up = 1;
4910 flowctrl = tg3_advert_flowctrl_1000X(tp->
link_config.flowctrl);
4911 if (flowctrl & ADVERTISE_1000XPAUSE)
4913 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4916 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4919 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4921 MAC_STATUS_PCS_SYNCED)) {
4923 current_link_up = 1;
4935 }
else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4941 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4942 u32 local_adv = 0, remote_adv = 0;
4955 mii_adv_to_ethtool_adv_x(remote_adv);
4957 tg3_setup_flow_control(tp, local_adv, remote_adv);
4958 current_link_up = 1;
4961 }
else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4966 u32 val = serdes_cfg;
4983 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4985 tg3_setup_flow_control(tp, 0, 0);
4986 current_link_up = 1;
4992 goto restart_autoneg;
5001 return current_link_up;
5004 static int tg3_setup_fiber_by_hand(
struct tg3 *tp, u32 mac_status)
5006 int current_link_up = 0;
5008 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5012 u32 txflags, rxflags;
5015 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5016 u32 local_adv = 0, remote_adv = 0;
5029 mii_adv_to_ethtool_adv_x(remote_adv);
5031 tg3_setup_flow_control(tp, local_adv, remote_adv);
5033 current_link_up = 1;
5035 for (i = 0; i < 30; i++) {
5048 if (current_link_up == 0 &&
5049 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5051 current_link_up = 1;
5053 tg3_setup_flow_control(tp, 0, 0);
5056 current_link_up = 1;
5066 return current_link_up;
5069 static int tg3_setup_fiber_phy(
struct tg3 *tp,
int force_reset)
5072 u16 orig_active_speed;
5073 u8 orig_active_duplex;
5075 int current_link_up;
5080 orig_active_duplex = tp->
link_config.active_duplex;
5083 netif_carrier_ok(tp->
dev) &&
5086 mac_status &= (MAC_STATUS_PCS_SYNCED |
5090 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5106 tg3_init_bcm8002(tp);
5112 current_link_up = 0;
5117 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5119 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5121 tp->
napi[0].hw_status->status =
5125 for (i = 0; i < 100; i++) {
5136 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5137 current_link_up = 0;
5147 if (current_link_up == 1) {
5161 if (current_link_up != netif_carrier_ok(tp->
dev)) {
5162 if (current_link_up)
5166 tg3_link_report(tp);
5168 u32 now_pause_cfg = tp->
link_config.active_flowctrl;
5169 if (orig_pause_cfg != now_pause_cfg ||
5170 orig_active_speed != tp->
link_config.active_speed ||
5171 orig_active_duplex != tp->
link_config.active_duplex)
5172 tg3_link_report(tp);
5178 static int tg3_setup_fiber_mii_phy(
struct tg3 *tp,
int force_reset)
5180 int current_link_up, err = 0;
5184 u32 local_adv, remote_adv;
5202 current_link_up = 0;
5207 err |= tg3_readphy(tp,
MII_BMSR, &bmsr);
5208 err |= tg3_readphy(tp,
MII_BMSR, &bmsr);
5213 bmsr &= ~BMSR_LSTATUS;
5216 err |= tg3_readphy(tp,
MII_BMCR, &bmcr);
5226 ADVERTISE_1000XPAUSE |
5227 ADVERTISE_1000XPSE_ASYM |
5230 newadv |= tg3_advert_flowctrl_1000X(tp->
link_config.flowctrl);
5231 newadv |= ethtool_adv_to_mii_adv_x(tp->
link_config.advertising);
5233 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5253 if (new_bmcr != bmcr) {
5260 if (netif_carrier_ok(tp->
dev)) {
5274 tg3_writephy(tp,
MII_BMCR, new_bmcr);
5276 err |= tg3_readphy(tp,
MII_BMSR, &bmsr);
5277 err |= tg3_readphy(tp,
MII_BMSR, &bmsr);
5283 bmsr &= ~BMSR_LSTATUS;
5289 if (bmsr & BMSR_LSTATUS) {
5291 current_link_up = 1;
5300 if (bmcr & BMCR_ANENABLE) {
5304 err |= tg3_readphy(tp,
MII_LPA, &remote_adv);
5305 common = local_adv & remote_adv;
5314 mii_adv_to_ethtool_adv_x(remote_adv);
5315 }
else if (!
tg3_flag(tp, 5780_CLASS)) {
5318 current_link_up = 0;
5323 if (current_link_up == 1 && current_duplex ==
DUPLEX_FULL)
5324 tg3_setup_flow_control(tp, local_adv, remote_adv);
5338 if (current_link_up != netif_carrier_ok(tp->
dev)) {
5339 if (current_link_up)
5345 tg3_link_report(tp);
5350 static void tg3_serdes_parallel_detect(
struct tg3 *tp)
5358 if (!netif_carrier_ok(tp->
dev) &&
5363 if (bmcr & BMCR_ANENABLE) {
5376 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5382 bmcr &= ~BMCR_ANENABLE;
5388 }
else if (netif_carrier_ok(tp->
dev) &&
5402 tg3_writephy(tp,
MII_BMCR, bmcr | BMCR_ANENABLE);
5410 static int tg3_setup_phy(
struct tg3 *tp,
int force_reset)
5416 err = tg3_setup_fiber_phy(tp, force_reset);
5418 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5420 err = tg3_setup_copper_phy(tp, force_reset);
5454 if (netif_carrier_ok(tp->
dev)) {
5456 tp->
coal.stats_block_coalesce_usecs);
5462 if (
tg3_flag(tp, ASPM_WORKAROUND)) {
5464 if (!netif_carrier_ok(tp->
dev))
5475 static inline int tg3_irq_sync(
struct tg3 *tp)
5480 static inline void tg3_rd32_loop(
struct tg3 *tp, u32 *
dst, u32 off, u32 len)
5484 dst = (u32 *)((
u8 *)dst + off);
5485 for (i = 0; i < len; i +=
sizeof(
u32))
5486 *dst++ =
tr32(off + i);
5489 static void tg3_dump_legacy_regs(
struct tg3 *tp, u32 *
regs)
5493 tg3_rd32_loop(tp, regs,
MAC_MODE, 0x4f0);
5530 tg3_rd32_loop(tp, regs,
FTQ_RESET, 0x120);
5532 tg3_rd32_loop(tp, regs,
DMAC_MODE, 0x04);
5533 tg3_rd32_loop(tp, regs,
GRC_MODE, 0x4c);
5536 tg3_rd32_loop(tp, regs,
NVRAM_CMD, 0x24);
5539 static void tg3_dump_state(
struct tg3 *tp)
5546 netdev_err(tp->
dev,
"Failed allocating register dump buffer\n");
5553 regs[i /
sizeof(
u32)] =
tr32(i);
5555 tg3_dump_legacy_regs(tp, regs);
5558 if (!regs[i + 0] && !regs[i + 1] &&
5559 !regs[i + 2] && !regs[i + 3])
5562 netdev_err(tp->
dev,
"0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5564 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5569 for (i = 0; i < tp->
irq_cnt; i++) {
5574 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5585 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5603 static void tg3_tx_recover(
struct tg3 *tp)
5608 netdev_warn(tp->
dev,
5609 "The system may be re-ordering memory-mapped I/O "
5610 "cycles to the network device, attempting to recover. "
5611 "Please report the problem to the driver maintainer "
5612 "and include system chipset information.\n");
5614 spin_lock(&tp->
lock);
5616 spin_unlock(&tp->
lock);
5619 static inline u32 tg3_tx_avail(
struct tg3_napi *tnapi)
5631 static void tg3_tx(
struct tg3_napi *tnapi)
5633 struct tg3 *tp = tnapi->
tp;
5634 u32 hw_idx = tnapi->
hw_status->idx[0].tx_consumer;
5638 unsigned int pkts_compl = 0, bytes_compl = 0;
5643 txq = netdev_get_tx_queue(tp->
dev, index);
5645 while (sw_idx != hw_idx) {
5655 pci_unmap_single(tp->
pdev,
5670 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5675 pci_unmap_page(tp->
pdev,
5677 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5690 bytes_compl += skb->
len;
5700 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5711 if (
unlikely(netif_tx_queue_stopped(txq) &&
5714 if (netif_tx_queue_stopped(txq) &&
5716 netif_tx_wake_queue(txq);
5717 __netif_tx_unlock(txq);
5721 static void tg3_frag_free(
bool is_frag,
void *data)
5729 static void tg3_rx_data_free(
struct tg3 *tp,
struct ring_info *ri, u32 map_sz)
5739 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5756 u32 opaque_key, u32 dest_idx_unmasked,
5757 unsigned int *frag_size)
5765 switch (opaque_key) {
5768 desc = &tpr->
rx_std[dest_idx];
5775 desc = &tpr->
rx_jmb[dest_idx].std;
5794 *frag_size = skb_size;
5802 mapping = pci_map_single(tp->
pdev,
5806 if (
unlikely(pci_dma_mapping_error(tp->
pdev, mapping))) {
5807 tg3_frag_free(skb_size <=
PAGE_SIZE, data);
5824 static void tg3_recycle_rx(
struct tg3_napi *tnapi,
5826 u32 opaque_key,
int src_idx,
5827 u32 dest_idx_unmasked)
5829 struct tg3 *tp = tnapi->
tp;
5835 switch (opaque_key) {
5838 dest_desc = &dpr->
rx_std[dest_idx];
5840 src_desc = &spr->
rx_std[src_idx];
5846 dest_desc = &dpr->
rx_jmb[dest_idx].std;
5848 src_desc = &spr->
rx_jmb[src_idx].std;
5896 struct tg3 *tp = tnapi->
tp;
5897 u32 work_mask, rx_std_posted = 0;
5898 u32 std_prod_idx, jmb_prod_idx;
5914 while (sw_idx != hw_idx && budget > 0) {
5920 u32 opaque_key, desc_idx, *post_ptr;
5926 ri = &tp->
napi[0].prodring.rx_std_buffers[desc_idx];
5929 post_ptr = &std_prod_idx;
5932 ri = &tp->
napi[0].prodring.rx_jmb_buffers[desc_idx];
5935 post_ptr = &jmb_prod_idx;
5937 goto next_pkt_nopost;
5939 work_mask |= opaque_key;
5944 tg3_recycle_rx(tnapi, tpr, opaque_key,
5945 desc_idx, *post_ptr);
5958 unsigned int frag_size;
5960 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5961 *post_ptr, &frag_size);
5965 pci_unmap_single(tp->
pdev, dma_addr, skb_size,
5970 tg3_frag_free(frag_size != 0, data);
5971 goto drop_it_no_recycle;
5982 tg3_recycle_rx(tnapi, tpr, opaque_key,
5983 desc_idx, *post_ptr);
5985 skb = netdev_alloc_skb(tp->
dev,
5988 goto drop_it_no_recycle;
6005 skb_checksum_none_assert(skb);
6012 goto drop_it_no_recycle;
6017 __vlan_hwaccel_put_tag(skb,
6041 if (sw_idx == hw_idx) {
6069 }
else if (work_mask) {
6078 if (tnapi != &tp->
napi[1]) {
6080 napi_schedule(&tp->
napi[1].napi);
6087 static void tg3_poll_link(
struct tg3 *tp)
6096 spin_lock(&tp->
lock);
6105 tg3_setup_phy(tp, 0);
6106 spin_unlock(&tp->
lock);
6111 static int tg3_rx_prodring_xfer(
struct tg3 *tp,
6115 u32 si, di, cpycnt, src_prod_idx;
6135 cpycnt =
min(cpycnt,
6141 for (i = di; i < di + cpycnt; i++) {
6162 for (i = 0; i < cpycnt; i++, di++, si++) {
6193 cpycnt =
min(cpycnt,
6199 for (i = di; i < di + cpycnt; i++) {
6220 for (i = 0; i < cpycnt; i++, di++, si++) {
6222 sbd = &spr->
rx_jmb[si].std;
6223 dbd = &dpr->
rx_jmb[di].std;
6237 static int tg3_poll_work(
struct tg3_napi *tnapi,
int work_done,
int budget)
6239 struct tg3 *tp = tnapi->
tp;
6256 work_done += tg3_rx(tnapi, budget - work_done);
6258 if (
tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->
napi[1]) {
6265 for (i = 1; i <= tp->
rxq_cnt; i++)
6266 err |= tg3_rx_prodring_xfer(tp, dpr,
6267 &tp->
napi[i].prodring);
6288 static inline void tg3_reset_task_schedule(
struct tg3 *tp)
6294 static inline void tg3_reset_task_cancel(
struct tg3 *tp)
6304 struct tg3 *tp = tnapi->
tp;
6309 work_done = tg3_poll_work(tnapi, work_done, budget);
6357 tg3_reset_task_schedule(tp);
6361 static void tg3_process_error(
struct tg3 *tp)
6364 bool real_error =
false;
6372 netdev_err(tp->
dev,
"FLOW Attention error. Resetting chip.\n");
6377 netdev_err(tp->
dev,
"MSI Status error. Resetting chip.\n");
6382 netdev_err(tp->
dev,
"DMA Status error. Resetting chip.\n");
6392 tg3_reset_task_schedule(tp);
6395 static int tg3_poll(
struct napi_struct *napi,
int budget)
6398 struct tg3 *tp = tnapi->
tp;
6404 tg3_process_error(tp);
6408 work_done = tg3_poll_work(tnapi, work_done, budget);
6427 if (
likely(!tg3_has_work(tnapi))) {
6429 tg3_int_reenable(tnapi);
6439 tg3_reset_task_schedule(tp);
6443 static void tg3_napi_disable(
struct tg3 *tp)
6447 for (i = tp->
irq_cnt - 1; i >= 0; i--)
6448 napi_disable(&tp->
napi[i].napi);
6451 static void tg3_napi_enable(
struct tg3 *tp)
6455 for (i = 0; i < tp->
irq_cnt; i++)
6456 napi_enable(&tp->
napi[i].napi);
6459 static void tg3_napi_init(
struct tg3 *tp)
6464 for (i = 1; i < tp->
irq_cnt; i++)
6468 static void tg3_napi_fini(
struct tg3 *tp)
6472 for (i = 0; i < tp->
irq_cnt; i++)
6476 static inline void tg3_netif_stop(
struct tg3 *tp)
6479 tg3_napi_disable(tp);
6480 netif_tx_disable(tp->
dev);
6483 static inline void tg3_netif_start(
struct tg3 *tp)
6489 netif_tx_wake_all_queues(tp->
dev);
6491 tg3_napi_enable(tp);
6493 tg3_enable_ints(tp);
6496 static void tg3_irq_quiesce(
struct tg3 *tp)
6505 for (i = 0; i < tp->
irq_cnt; i++)
6514 static inline void tg3_full_lock(
struct tg3 *tp,
int irq_sync)
6516 spin_lock_bh(&tp->
lock);
6518 tg3_irq_quiesce(tp);
6521 static inline void tg3_full_unlock(
struct tg3 *tp)
6523 spin_unlock_bh(&tp->
lock);
6532 struct tg3 *tp = tnapi->
tp;
6538 if (
likely(!tg3_irq_sync(tp)))
6539 napi_schedule(&tnapi->napi);
6551 struct tg3 *tp = tnapi->
tp;
6564 if (
likely(!tg3_irq_sync(tp)))
6565 napi_schedule(&tnapi->napi);
6570 static irqreturn_t tg3_interrupt(
int irq,
void *dev_id)
6573 struct tg3 *tp = tnapi->
tp;
6575 unsigned int handled = 1;
6583 if (
tg3_flag(tp, CHIP_RESETTING) ||
6602 if (tg3_irq_sync(tp))
6605 if (
likely(tg3_has_work(tnapi))) {
6607 napi_schedule(&tnapi->napi);
6619 static irqreturn_t tg3_interrupt_tagged(
int irq,
void *dev_id)
6622 struct tg3 *tp = tnapi->
tp;
6624 unsigned int handled = 1;
6632 if (
tg3_flag(tp, CHIP_RESETTING) ||
6660 if (tg3_irq_sync(tp))
6665 napi_schedule(&tnapi->napi);
6672 static irqreturn_t tg3_test_isr(
int irq,
void *dev_id)
6675 struct tg3 *tp = tnapi->
tp;
6680 tg3_disable_ints(tp);
6686 #ifdef CONFIG_NET_POLL_CONTROLLER
6687 static void tg3_poll_controller(
struct net_device *dev)
6690 struct tg3 *tp = netdev_priv(dev);
6692 for (i = 0; i < tp->
irq_cnt; i++)
6693 tg3_interrupt(tp->
napi[i].irq_vec, &tp->
napi[i]);
6697 static void tg3_tx_timeout(
struct net_device *dev)
6699 struct tg3 *tp = netdev_priv(dev);
6702 netdev_err(dev,
"transmit timed out, resetting\n");
6706 tg3_reset_task_schedule(tp);
6710 static inline int tg3_4g_overflow_test(
dma_addr_t mapping,
int len)
6712 u32
base = (
u32) mapping & 0xffffffff;
6714 return (base > 0xffffdcc0) && (base + len + 8 < base);
6718 static inline int tg3_40bit_overflow_test(
struct tg3 *tp,
dma_addr_t mapping,
6721 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6735 txbd->
addr_lo = ((
u64) mapping & 0xffffffff);
6740 static bool tg3_tx_frag_set(
struct tg3_napi *tnapi, u32 *
entry, u32 *budget,
6744 struct tg3 *tp = tnapi->
tp;
6747 if (
tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6750 if (tg3_4g_overflow_test(map, len))
6753 if (tg3_40bit_overflow_test(tp, map, len))
6757 u32 prvidx = *
entry;
6759 while (len > tp->
dma_limit && *budget) {
6771 tg3_tx_set_bd(&tnapi->
tx_ring[*entry], map,
6772 frag_len, tmp_flag, mss, vlan);
6782 tg3_tx_set_bd(&tnapi->
tx_ring[*entry], map,
6783 len, flags, mss, vlan);
6788 tnapi->
tx_buffers[prvidx].fragmented =
false;
6792 tg3_tx_set_bd(&tnapi->
tx_ring[*entry], map,
6793 len, flags, mss, vlan);
6800 static void tg3_tx_skb_unmap(
struct tg3_napi *tnapi, u32 entry,
int last)
6809 pci_unmap_single(tnapi->
tp->pdev,
6820 for (i = 0; i <= last; i++) {
6826 pci_unmap_page(tnapi->
tp->pdev,
6839 static int tigon3_dma_hwbug_workaround(
struct tg3_napi *tnapi,
6841 u32 *entry, u32 *budget,
6842 u32 base_flags, u32 mss, u32 vlan)
6844 struct tg3 *tp = tnapi->
tp;
6845 struct sk_buff *new_skb, *skb = *pskb;
6852 int more_headroom = 4 - ((
unsigned long)skb->
data & 3);
6855 skb_headroom(skb) + more_headroom,
6863 new_addr = pci_map_single(tp->
pdev, new_skb->
data, new_skb->
len,
6866 if (pci_dma_mapping_error(tp->
pdev, new_addr)) {
6867 dev_kfree_skb(new_skb);
6870 u32 save_entry = *
entry;
6878 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6879 new_skb->
len, base_flags,
6881 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6882 dev_kfree_skb(new_skb);
6898 static int tg3_tso_bug(
struct tg3 *tp,
struct sk_buff *skb)
6901 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6904 if (
unlikely(tg3_tx_avail(&tp->
napi[0]) <= frag_cnt_est)) {
6905 netif_stop_queue(tp->
dev);
6913 if (tg3_tx_avail(&tp->
napi[0]) <= frag_cnt_est)
6916 netif_wake_queue(tp->
dev);
6921 goto tg3_tso_bug_end;
6927 tg3_start_xmit(nskb, tp->
dev);
6941 struct tg3 *tp = netdev_priv(dev);
6942 u32 len,
entry, base_flags,
mss, vlan = 0;
6944 int i = -1, would_hit_hwbug;
6950 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6951 tnapi = &tp->
napi[skb_get_queue_mapping(skb)];
6955 budget = tg3_tx_avail(tnapi);
6962 if (
unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6963 if (!netif_tx_queue_stopped(txq)) {
6964 netif_tx_stop_queue(txq);
6968 "BUG! Tx Ring full when queue awake!\n");
6973 entry = tnapi->tx_prod;
6978 mss = skb_shinfo(skb)->gso_size;
6983 if (skb_header_cloned(skb) &&
6988 tcp_opt_len = tcp_optlen(skb);
6990 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) -
ETH_HLEN;
6992 if (!skb_is_gso_v6(skb)) {
6999 return tg3_tso_bug(tp, skb);
7007 tcp_hdr(skb)->check = 0;
7016 mss |= (hdr_len & 0xc) << 12;
7018 base_flags |= 0x00000010;
7019 base_flags |= (hdr_len & 0x3e0) << 5;
7021 mss |= hdr_len << 9;
7024 if (tcp_opt_len || iph->ihl > 5) {
7027 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7028 mss |= (tsflags << 11);
7031 if (tcp_opt_len || iph->ihl > 5) {
7034 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7035 base_flags |= tsflags << 12;
7040 if (
tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7049 len = skb_headlen(skb);
7052 if (pci_dma_mapping_error(tp->
pdev, mapping))
7059 would_hit_hwbug = 0;
7062 would_hit_hwbug = 1;
7064 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7067 would_hit_hwbug = 1;
7068 }
else if (skb_shinfo(skb)->nr_frags > 0) {
7079 last = skb_shinfo(skb)->nr_frags - 1;
7080 for (i = 0; i <= last; i++) {
7083 len = skb_frag_size(frag);
7084 mapping = skb_frag_dma_map(&tp->
pdev->dev, frag, 0,
7094 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7098 would_hit_hwbug = 1;
7104 if (would_hit_hwbug) {
7105 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7110 entry = tnapi->tx_prod;
7111 budget = tg3_tx_avail(tnapi);
7112 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7113 base_flags, mss, vlan))
7117 skb_tx_timestamp(skb);
7118 netdev_tx_sent_queue(txq, skb->
len);
7126 tnapi->tx_prod =
entry;
7128 netif_tx_stop_queue(txq);
7137 netif_tx_wake_queue(txq);
7144 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7153 static void tg3_mac_loopback(
struct tg3 *tp,
bool enable)
7181 static int tg3_phy_lpbk_set(
struct tg3 *tp, u32 speed,
bool extlpbk)
7183 u32
val, bmcr, mac_mode, ptest = 0;
7185 tg3_phy_toggle_apd(tp,
false);
7186 tg3_phy_toggle_automdix(tp, 0);
7188 if (extlpbk && tg3_phy_set_extloopbk(tp))
7276 struct tg3 *tp = netdev_priv(dev);
7282 spin_lock_bh(&tp->
lock);
7283 tg3_mac_loopback(tp,
true);
7285 spin_unlock_bh(&tp->
lock);
7286 netdev_info(dev,
"Internal MAC loopback mode enabled.\n");
7291 spin_lock_bh(&tp->
lock);
7292 tg3_mac_loopback(tp,
false);
7294 tg3_setup_phy(tp, 1);
7295 spin_unlock_bh(&tp->
lock);
7296 netdev_info(dev,
"Internal MAC loopback mode disabled.\n");
7303 struct tg3 *tp = netdev_priv(dev);
7315 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7316 tg3_set_loopback(dev, features);
7321 static void tg3_rx_prodring_free(
struct tg3 *tp,
7326 if (tpr != &tp->
napi[0].prodring) {
7362 static int tg3_rx_prodring_alloc(
struct tg3 *tp,
7365 u32
i, rx_pkt_dma_sz;
7372 if (tpr != &tp->
napi[0].prodring) {
7400 rxd->
opaque = (RXD_OPAQUE_RING_STD |
7406 unsigned int frag_size;
7408 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7410 netdev_warn(tp->
dev,
7411 "Using a smaller RX standard ring. Only "
7412 "%d out of %d buffers were allocated "
7426 if (!
tg3_flag(tp, JUMBO_RING_ENABLE))
7441 unsigned int frag_size;
7445 netdev_warn(tp->
dev,
7446 "Using a smaller RX jumbo ring. Only %d "
7447 "out of %d buffers were allocated "
7460 tg3_rx_prodring_free(tp, tpr);
7464 static void tg3_rx_prodring_fini(
struct tg3 *tp,
7483 static int tg3_rx_prodring_init(
struct tg3 *tp,
7515 tg3_rx_prodring_fini(tp, tpr);
7526 static void tg3_free_rings(
struct tg3 *tp)
7530 for (j = 0; j < tp->
irq_cnt; j++) {
7533 tg3_rx_prodring_free(tp, &tnapi->
prodring);
7544 tg3_tx_skb_unmap(tnapi, i,
7545 skb_shinfo(skb)->nr_frags - 1);
7549 netdev_tx_reset_queue(netdev_get_tx_queue(tp->
dev, j));
7560 static int tg3_init_rings(
struct tg3 *tp)
7567 for (i = 0; i < tp->
irq_cnt; i++) {
7585 if (tg3_rx_prodring_alloc(tp, &tnapi->
prodring)) {
7594 static void tg3_mem_tx_release(
struct tg3 *tp)
7598 for (i = 0; i < tp->
irq_max; i++) {
7612 static int tg3_mem_tx_acquire(
struct tg3 *tp)
7623 for (i = 0; i < tp->
txq_cnt; i++, tnapi++) {
7640 tg3_mem_tx_release(tp);
7644 static void tg3_mem_rx_release(
struct tg3 *tp)
7648 for (i = 0; i < tp->
irq_max; i++) {
7651 tg3_rx_prodring_fini(tp, &tnapi->
prodring);
7664 static int tg3_mem_rx_acquire(
struct tg3 *tp)
7676 for (i = 0; i <
limit; i++) {
7679 if (tg3_rx_prodring_init(tp, &tnapi->
prodring))
7686 if (!i &&
tg3_flag(tp, ENABLE_RSS))
7702 tg3_mem_rx_release(tp);
7710 static void tg3_free_consistent(
struct tg3 *tp)
7714 for (i = 0; i < tp->
irq_cnt; i++) {
7725 tg3_mem_rx_release(tp);
7726 tg3_mem_tx_release(tp);
7739 static int tg3_alloc_consistent(
struct tg3 *tp)
7752 for (i = 0; i < tp->
irq_cnt; i++) {
7777 prodptr = &sblk->
idx[0].rx_producer;
7795 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7801 tg3_free_consistent(tp);
7805 #define MAX_WAIT_CNT 1000
7810 static int tg3_stop_block(
struct tg3 *tp,
unsigned long ofs, u32 enable_bit,
int silent)
7839 if ((val & enable_bit) == 0)
7843 if (i == MAX_WAIT_CNT && !silent) {
7845 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7854 static int tg3_abort_hw(
struct tg3 *tp,
int silent)
7858 tg3_disable_ints(tp);
7891 if (i >= MAX_WAIT_CNT) {
7893 "%s timed out, TX_MODE_ENABLE will not clear "
7908 for (i = 0; i < tp->
irq_cnt; i++) {
7918 static void tg3_save_pci_state(
struct tg3 *tp)
7924 static void tg3_restore_pci_state(
struct tg3 *tp)
7972 pci_read_config_word(tp->
pdev,
7975 pci_write_config_word(tp->
pdev,
7985 static int tg3_chip_reset(
struct tg3 *tp)
8004 tg3_save_pci_state(tp);
8017 if (write_op == tg3_write_flush_reg32)
8027 for (i = 0; i < tp->
irq_cnt; i++) {
8038 for (i = 0; i < tp->
irq_cnt; i++)
8103 if (
tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->
pdev)) {
8111 for (j = 0; j < 5000; j++)
8114 pci_read_config_dword(tp->
pdev, 0xc4, &cfg_val);
8115 pci_write_config_dword(tp->
pdev, 0xc4,
8116 cfg_val | (1 << 15));
8137 tg3_restore_pci_state(tp);
8149 tw32(0x5000, 0x400);
8157 tw32(0xc4, val | (1 << 15));
8182 err = tg3_poll_fw(tp);
8194 tw32(0x7c00, val | (1 << 25));
8225 static int tg3_halt(
struct tg3 *tp,
int kind,
int silent)
8231 tg3_write_sig_pre_reset(tp, kind);
8233 tg3_abort_hw(tp, silent);
8234 err = tg3_chip_reset(tp);
8236 __tg3_set_mac_addr(tp, 0);
8238 tg3_write_sig_legacy(tp, kind);
8239 tg3_write_sig_post_reset(tp, kind);
8256 static int tg3_set_mac_addr(
struct net_device *dev,
void *
p)
8258 struct tg3 *tp = netdev_priv(dev);
8260 int err = 0, skip_mac_1 = 0;
8262 if (!is_valid_ether_addr(addr->
sa_data))
8267 if (!netif_running(dev))
8271 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8279 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8280 !(addr1_high == 0 && addr1_low == 0))
8283 spin_lock_bh(&tp->
lock);
8284 __tg3_set_mac_addr(tp, skip_mac_1);
8285 spin_unlock_bh(&tp->
lock);
8291 static void tg3_set_bdinfo(
struct tg3 *tp, u32 bdinfo_addr,
8297 ((
u64) mapping >> 32));
8300 ((
u64) mapping & 0xffffffff));
8325 for (; i < tp->
txq_cnt; i++) {
8337 for (; i < tp->
irq_max - 1; i++) {
8360 for (; i <
limit; i++) {
8371 for (; i < tp->
irq_max - 1; i++) {
8380 tg3_coal_tx_init(tp, ec);
8381 tg3_coal_rx_init(tp, ec);
8389 if (!netif_carrier_ok(tp->
dev))
8397 static void tg3_rings_reset(
struct tg3 *tp)
8400 u32 stblk, txrcb, rxrcb,
limit;
8408 else if (
tg3_flag(tp, 57765_CLASS))
8437 tp->
napi[0].chk_msi_cnt = 0;
8438 tp->
napi[0].last_rx_cons = 0;
8439 tp->
napi[0].last_tx_cons = 0;
8443 for (i = 1; i < tp->
irq_max; i++) {
8444 tp->
napi[
i].tx_prod = 0;
8445 tp->
napi[
i].tx_cons = 0;
8450 tp->
napi[
i].chk_msi_cnt = 0;
8451 tp->
napi[
i].last_rx_cons = 0;
8452 tp->
napi[
i].last_tx_cons = 0;
8457 tp->
napi[0].tx_prod = 0;
8458 tp->
napi[0].tx_cons = 0;
8466 for (i = 0; i < 16; i++)
8484 (TG3_TX_RING_SIZE <<
8499 for (i = 1, tnapi++; i < tp->
irq_cnt; i++, tnapi++) {
8502 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8509 (TG3_TX_RING_SIZE <<
8524 static void tg3_setup_rxbd_thresholds(
struct tg3 *tp)
8526 u32
val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8543 val =
min(nic_rep_thresh, host_rep_thresh);
8556 val =
min(bdcache_maxcnt / 2, host_rep_thresh);
8563 static inline u32 calc_crc(
unsigned char *buf,
int len)
8571 for (j = 0; j < len; j++) {
8574 for (k = 0; k < 8; k++) {
8587 static void tg3_set_multi(
struct tg3 *tp,
unsigned int accept_all)
8596 static void __tg3_set_rx_mode(
struct net_device *dev)
8598 struct tg3 *tp = netdev_priv(dev);
8604 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8617 tg3_set_multi(tp, 1);
8620 tg3_set_multi(tp, 0);
8624 u32 mc_filter[4] = { 0, };
8632 regidx = (bit & 0x60) >> 5;
8634 mc_filter[regidx] |= (1 <<
bit);
8650 static void tg3_rss_init_dflt_indir_tbl(
struct tg3 *tp, u32
qcnt)
8655 tp->
rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8658 static void tg3_rss_check_indir_tbl(
struct tg3 *tp)
8676 if (i != TG3_RSS_INDIR_TBL_SIZE)
8677 tg3_rss_init_dflt_indir_tbl(tp, tp->
rxq_cnt);
8680 static void tg3_rss_write_indir_tbl(
struct tg3 *tp)
8685 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8688 for (; i % 8; i++) {
8698 static int tg3_reset_hw(
struct tg3 *tp,
int reset_phy)
8700 u32
val, rdmac_mode;
8704 tg3_disable_ints(tp);
8711 tg3_abort_hw(tp, 1);
8747 err = tg3_chip_reset(tp);
8873 val |= (1 << 26) | (1 << 28) | (1 << 29);
8882 err = tg3_init_rings(tp);
8938 }
else if (
tg3_flag(tp, TSO_CAPABLE)) {
8942 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8977 for (i = 0; i < 2000; i++) {
8983 netdev_err(tp->
dev,
"%s cannot enable BUFMGR\n", __func__);
8990 tg3_setup_rxbd_thresholds(tp);
9028 if (
tg3_flag(tp, JUMBO_RING_ENABLE)) {
9037 if (!
tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9064 tg3_rings_reset(tp);
9067 __tg3_set_mac_addr(tp, 0);
9184 for (i = 0; i < 2000; i++) {
9190 __tg3_set_coalesce(tp, &tp->
coal);
9209 tg3_write_mem(tp, i, 0);
9264 if (
tg3_flag(tp, EEPROM_WRITE_PROT))
9338 if (i < TG3_NUM_RDMA_CHANNELS) {
9359 if (
tg3_flag(tp, LRG_PROD_RING_CAP))
9374 err = tg3_load_5701_a0_firmware_fix(tp);
9380 err = tg3_load_tso_firmware(tp);
9401 tg3_rss_write_indir_tbl(tp);
9485 err = tg3_setup_phy(tp, 0);
9502 __tg3_set_rx_mode(tp->
dev);
9565 static int tg3_init_hw(
struct tg3 *tp,
int reset_phy)
9567 tg3_switch_clocks(tp);
9571 return tg3_reset_hw(tp, reset_phy);
9574 static void tg3_sd_scan_scratchpad(
struct tg3 *tp,
struct tg3_ocir *ocir)
9581 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9586 memset(ocir, 0, TG3_OCIR_LEN);
9595 struct net_device *netdev = pci_get_drvdata(pdev);
9596 struct tg3 *tp = netdev_priv(netdev);
9600 spin_lock_bh(&tp->
lock);
9601 tg3_ape_scratchpad_read(tp, &temperature, attr->
index,
9602 sizeof(temperature));
9603 spin_unlock_bh(&tp->
lock);
9604 return sprintf(buf,
"%u\n", temperature);
9615 static struct attribute *tg3_attributes[] = {
9616 &sensor_dev_attr_temp1_input.dev_attr.attr,
9617 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9618 &sensor_dev_attr_temp1_max.dev_attr.attr,
9623 .attrs = tg3_attributes,
9626 static void tg3_hwmon_close(
struct tg3 *tp)
9628 if (tp->hwmon_dev) {
9630 tp->hwmon_dev =
NULL;
9635 static void tg3_hwmon_open(
struct tg3 *tp)
9640 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9642 tg3_sd_scan_scratchpad(tp, ocirs);
9648 size += ocirs[
i].src_hdr_length;
9649 size += ocirs[
i].src_data_length;
9658 dev_err(&pdev->
dev,
"Cannot create sysfs group, aborting\n");
9663 if (IS_ERR(tp->hwmon_dev)) {
9664 tp->hwmon_dev =
NULL;
9665 dev_err(&pdev->
dev,
"Cannot register hwmon device, aborting\n");
9671 #define TG3_STAT_ADD32(PSTAT, REG) \
9672 do { u32 __val = tr32(REG); \
9673 (PSTAT)->low += __val; \
9674 if ((PSTAT)->low < __val) \
9675 (PSTAT)->high += 1; \
9678 static void tg3_periodic_fetch_stats(
struct tg3 *tp)
9682 if (!netif_carrier_ok(tp->
dev))
9743 static void tg3_chk_missed_msi(
struct tg3 *tp)
9747 for (i = 0; i < tp->
irq_cnt; i++) {
9750 if (tg3_has_work(tnapi)) {
9766 static void tg3_timer(
unsigned long __opaque)
9768 struct tg3 *tp = (
struct tg3 *) __opaque;
9773 spin_lock(&tp->
lock);
9777 tg3_chk_missed_msi(tp);
9779 if (!
tg3_flag(tp, TAGGED_STATUS)) {
9793 spin_unlock(&tp->
lock);
9794 tg3_reset_task_schedule(tp);
9802 tg3_periodic_fetch_stats(tp);
9805 tg3_phy_eee_enable(tp);
9807 if (
tg3_flag(tp, USE_LINKCHG_REG)) {
9821 tg3_setup_phy(tp, 0);
9822 }
else if (
tg3_flag(tp, POLL_SERDES)) {
9826 if (netif_carrier_ok(tp->
dev) &&
9827 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9830 if (!netif_carrier_ok(tp->
dev) &&
9831 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9844 tg3_setup_phy(tp, 0);
9848 tg3_serdes_parallel_detect(tp);
9873 tg3_wait_for_event_ack(tp);
9881 tg3_generate_fw_event(tp);
9886 spin_unlock(&tp->
lock);
9910 tp->
timer.function = tg3_timer;
9913 static void tg3_timer_start(
struct tg3 *tp)
9922 static void tg3_timer_stop(
struct tg3 *tp)
9930 static int tg3_restart_hw(
struct tg3 *tp,
int reset_phy)
9936 err = tg3_init_hw(tp, reset_phy);
9939 "Failed to re-initialize device, aborting\n");
9941 tg3_full_unlock(tp);
9944 tg3_napi_enable(tp);
9946 tg3_full_lock(tp, 0);
9956 tg3_full_lock(tp, 0);
9958 if (!netif_running(tp->
dev)) {
9960 tg3_full_unlock(tp);
9964 tg3_full_unlock(tp);
9970 tg3_full_lock(tp, 1);
9972 if (
tg3_flag(tp, TX_RECOVERY_PENDING)) {
9980 err = tg3_init_hw(tp, 1);
9984 tg3_netif_start(tp);
9987 tg3_full_unlock(tp);
9995 static int tg3_request_irq(
struct tg3 *tp,
int irq_num)
9998 unsigned long flags;
10003 name = tp->
dev->name;
10013 fn = tg3_msi_1shot;
10016 fn = tg3_interrupt;
10018 fn = tg3_interrupt_tagged;
10025 static int tg3_test_interrupt(
struct tg3 *tp)
10029 int err,
i, intr_ok = 0;
10032 if (!netif_running(dev))
10035 tg3_disable_ints(tp);
10054 tg3_enable_ints(tp);
10059 for (i = 0; i < 5; i++) {
10060 u32 int_mbox, misc_host_ctrl;
10065 if ((int_mbox != 0) ||
10078 tg3_disable_ints(tp);
10082 err = tg3_request_irq(tp, 0);
10102 static int tg3_test_msi(
struct tg3 *tp)
10117 err = tg3_test_interrupt(tp);
10129 netdev_warn(tp->
dev,
"No interrupt was generated using MSI. Switching "
10130 "to INTx mode. Please report this failure to the PCI "
10131 "maintainer and include system chipset information\n");
10138 tp->
napi[0].irq_vec = tp->
pdev->irq;
10140 err = tg3_request_irq(tp, 0);
10147 tg3_full_lock(tp, 1);
10150 err = tg3_init_hw(tp, 1);
10152 tg3_full_unlock(tp);
10160 static int tg3_request_firmware(
struct tg3 *tp)
10165 netdev_err(tp->
dev,
"Failed to load firmware \"%s\"\n",
10170 fw_data = (
void *)tp->
fw->data;
10178 if (tp->
fw_len < (tp->
fw->size - 12)) {
10179 netdev_err(tp->
dev,
"bogus length %d in \"%s\"\n",
10191 static u32 tg3_irq_count(
struct tg3 *tp)
10207 static bool tg3_enable_msix(
struct tg3 *tp)
10226 tp->
irq_cnt = tg3_irq_count(tp);
10228 for (i = 0; i < tp->
irq_max; i++) {
10229 msix_ent[
i].entry =
i;
10230 msix_ent[
i].vector = 0;
10236 }
else if (rc != 0) {
10239 netdev_notice(tp->
dev,
"Requested %d MSI-X vectors, received %d\n",
10247 for (i = 0; i < tp->
irq_max; i++)
10248 tp->
napi[i].irq_vec = msix_ent[i].vector;
10250 if (netif_set_real_num_rx_queues(tp->
dev, tp->
rxq_cnt)) {
10268 static void tg3_ints_init(
struct tg3 *tp)
10275 netdev_warn(tp->
dev,
10276 "MSI without TAGGED_STATUS? Not using MSI\n");
10280 if (
tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10282 else if (
tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->
pdev) == 0)
10296 tp->
napi[0].irq_vec = tp->
pdev->irq;
10303 netif_set_real_num_rx_queues(tp->
dev, 1);
10307 static void tg3_ints_fini(
struct tg3 *tp)
10319 static int tg3_start(
struct tg3 *tp,
bool reset_phy,
bool test_irq)
10330 tg3_rss_check_indir_tbl(tp);
10335 err = tg3_alloc_consistent(tp);
10341 tg3_napi_enable(tp);
10343 for (i = 0; i < tp->
irq_cnt; i++) {
10345 err = tg3_request_irq(tp, i);
10347 for (i--; i >= 0; i--) {
10348 tnapi = &tp->
napi[
i];
10355 tg3_full_lock(tp, 0);
10357 err = tg3_init_hw(tp, reset_phy);
10360 tg3_free_rings(tp);
10363 tg3_full_unlock(tp);
10369 err = tg3_test_msi(tp);
10372 tg3_full_lock(tp, 0);
10374 tg3_free_rings(tp);
10375 tg3_full_unlock(tp);
10390 tg3_hwmon_open(tp);
10392 tg3_full_lock(tp, 0);
10394 tg3_timer_start(tp);
10396 tg3_enable_ints(tp);
10398 tg3_full_unlock(tp);
10400 netif_tx_start_all_queues(dev);
10406 if (dev->
features & NETIF_F_LOOPBACK)
10407 tg3_set_loopback(dev, dev->
features);
10412 for (i = tp->
irq_cnt - 1; i >= 0; i--) {
10418 tg3_napi_disable(tp);
10420 tg3_free_consistent(tp);
10428 static void tg3_stop(
struct tg3 *tp)
10432 tg3_napi_disable(tp);
10433 tg3_reset_task_cancel(tp);
10435 netif_tx_disable(tp->
dev);
10437 tg3_timer_stop(tp);
10439 tg3_hwmon_close(tp);
10443 tg3_full_lock(tp, 1);
10445 tg3_disable_ints(tp);
10448 tg3_free_rings(tp);
10451 tg3_full_unlock(tp);
10453 for (i = tp->
irq_cnt - 1; i >= 0; i--) {
10462 tg3_free_consistent(tp);
10467 struct tg3 *tp = netdev_priv(dev);
10471 err = tg3_request_firmware(tp);
10476 netdev_warn(tp->
dev,
"TSO capability disabled\n");
10478 }
else if (!
tg3_flag(tp, TSO_CAPABLE)) {
10479 netdev_notice(tp->
dev,
"TSO capability restored\n");
10486 err = tg3_power_up(tp);
10490 tg3_full_lock(tp, 0);
10492 tg3_disable_ints(tp);
10495 tg3_full_unlock(tp);
10497 err = tg3_start(tp,
true,
true);
10499 tg3_frob_aux_power(tp,
false);
10505 static int tg3_close(
struct net_device *dev)
10507 struct tg3 *tp = netdev_priv(dev);
10515 tg3_power_down(tp);
10527 static u64 tg3_calc_crc_errors(
struct tg3 *tp)
10551 #define ESTAT_ADD(member) \
10552 estats->member = old_estats->member + \
10553 get_stat64(&hw_stats->member)
10686 tg3_calc_crc_errors(tp);
10695 static int tg3_get_regs_len(
struct net_device *dev)
10700 static void tg3_get_regs(
struct net_device *dev,
10703 struct tg3 *tp = netdev_priv(dev);
10712 tg3_full_lock(tp, 0);
10714 tg3_dump_legacy_regs(tp, (u32 *)_p);
10716 tg3_full_unlock(tp);
10719 static int tg3_get_eeprom_len(
struct net_device *dev)
10721 struct tg3 *tp = netdev_priv(dev);
10728 struct tg3 *tp = netdev_priv(dev);
10731 u32
i,
offset, len, b_offset, b_count;
10740 offset = eeprom->
offset;
10748 b_offset = offset & 3;
10749 b_count = 4 - b_offset;
10750 if (b_count > len) {
10754 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10757 memcpy(data, ((
char *)&val) + b_offset, b_count);
10760 eeprom->
len += b_count;
10764 pd = &data[eeprom->
len];
10765 for (i = 0; i < (len - (len & 3)); i += 4) {
10766 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10771 memcpy(pd + i, &val, 4);
10777 pd = &data[eeprom->
len];
10779 b_offset = offset + len - b_count;
10780 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10783 memcpy(pd, &val, b_count);
10784 eeprom->
len += b_count;
10791 struct tg3 *tp = netdev_priv(dev);
10793 u32
offset, len, b_offset, odd_len;
10804 offset = eeprom->
offset;
10807 if ((b_offset = (offset & 3))) {
10809 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10822 len = (len + 3) & ~3;
10823 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10829 if (b_offset || odd_len) {
10836 memcpy(buf+len-4, &end, 4);
10837 memcpy(buf + b_offset, data, eeprom->
len);
10840 ret = tg3_nvram_write_block(tp, offset, len, buf);
10850 struct tg3 *tp = netdev_priv(dev);
10887 }
else if (tp->
link_config.flowctrl & FLOW_CTRL_TX) {
10891 if (netif_running(dev) && netif_carrier_ok(dev)) {
10892 ethtool_cmd_speed_set(cmd, tp->
link_config.active_speed);
10916 struct tg3 *tp = netdev_priv(dev);
10917 u32 speed = ethtool_cmd_speed(cmd);
10947 ADVERTISED_100baseT_Full |
10958 ADVERTISED_1000baseT_Full |
10960 ADVERTISED_100baseT_Full |
10979 tg3_full_lock(tp, 0);
10993 if (netif_running(dev))
10994 tg3_setup_phy(tp, 1);
10996 tg3_full_unlock(tp);
11003 struct tg3 *tp = netdev_priv(dev);
11013 struct tg3 *tp = netdev_priv(dev);
11015 if (
tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->
pdev->dev))
11020 if (
tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->
pdev->dev))
11027 struct tg3 *tp = netdev_priv(dev);
11033 !(
tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11038 spin_lock_bh(&tp->
lock);
11039 if (device_may_wakeup(dp))
11043 spin_unlock_bh(&tp->
lock);
11048 static u32 tg3_get_msglevel(
struct net_device *dev)
11050 struct tg3 *tp = netdev_priv(dev);
11056 struct tg3 *tp = netdev_priv(dev);
11060 static int tg3_nway_reset(
struct net_device *dev)
11062 struct tg3 *tp = netdev_priv(dev);
11065 if (!netif_running(dev))
11078 spin_lock_bh(&tp->
lock);
11081 if (!tg3_readphy(tp,
MII_BMCR, &bmcr) &&
11082 ((bmcr & BMCR_ANENABLE) ||
11088 spin_unlock_bh(&tp->
lock);
11096 struct tg3 *tp = netdev_priv(dev);
11099 if (
tg3_flag(tp, JUMBO_RING_ENABLE))
11107 if (
tg3_flag(tp, JUMBO_RING_ENABLE))
11117 struct tg3 *tp = netdev_priv(dev);
11118 int i, irq_sync = 0, err = 0;
11122 (ering->
tx_pending > TG3_TX_RING_SIZE - 1) ||
11128 if (netif_running(dev)) {
11130 tg3_netif_stop(tp);
11134 tg3_full_lock(tp, irq_sync);
11138 if (
tg3_flag(tp, MAX_RXPEND_64) &&
11143 for (i = 0; i < tp->
irq_max; i++)
11146 if (netif_running(dev)) {
11148 err = tg3_restart_hw(tp, 1);
11150 tg3_netif_start(tp);
11153 tg3_full_unlock(tp);
11155 if (irq_sync && !err)
11163 struct tg3 *tp = netdev_priv(dev);
11180 struct tg3 *tp = netdev_priv(dev);
11218 if (oldadv != newadv) {
11237 tg3_setup_flow_control(tp, 0, 0);
11247 if (netif_running(dev)) {
11248 tg3_netif_stop(tp);
11252 tg3_full_lock(tp, irq_sync);
11267 if (netif_running(dev)) {
11269 err = tg3_restart_hw(tp, 1);
11271 tg3_netif_start(tp);
11274 tg3_full_unlock(tp);
11280 static int tg3_get_sset_count(
struct net_device *dev,
int sset)
11295 struct tg3 *tp = netdev_priv(dev);
11300 switch (info->
cmd) {
11302 if (netif_running(tp->
dev))
11321 static u32 tg3_get_rxfh_indir_size(
struct net_device *dev)
11324 struct tg3 *tp = netdev_priv(dev);
11332 static int tg3_get_rxfh_indir(
struct net_device *dev, u32 *indir)
11334 struct tg3 *tp = netdev_priv(dev);
11343 static int tg3_set_rxfh_indir(
struct net_device *dev,
const u32 *indir)
11345 struct tg3 *tp = netdev_priv(dev);
11351 if (!netif_running(dev) || !
tg3_flag(tp, ENABLE_RSS))
11357 tg3_full_lock(tp, 0);
11358 tg3_rss_write_indir_tbl(tp);
11359 tg3_full_unlock(tp);
11364 static void tg3_get_channels(
struct net_device *dev,
11367 struct tg3 *tp = netdev_priv(dev);
11373 if (netif_running(dev)) {
11389 static int tg3_set_channels(
struct net_device *dev,
11392 struct tg3 *tp = netdev_priv(dev);
11404 if (!netif_running(dev))
11411 tg3_start(tp,
true,
false);
11416 static void tg3_get_strings(
struct net_device *dev, u32 stringset,
u8 *buf)
11418 switch (stringset) {
11420 memcpy(buf, ðtool_stats_keys,
sizeof(ethtool_stats_keys));
11423 memcpy(buf, ðtool_test_keys,
sizeof(ethtool_test_keys));
11431 static int tg3_set_phys_id(
struct net_device *dev,
11434 struct tg3 *tp = netdev_priv(dev);
11436 if (!netif_running(tp->
dev))
11466 static void tg3_get_ethtool_stats(
struct net_device *dev,
11469 struct tg3 *tp = netdev_priv(dev);
11477 static __be32 *tg3_vpd_readblock(
struct tg3 *tp, u32 *vpdlen)
11481 u32 offset = 0, len = 0;
11484 if (
tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11491 if (tg3_nvram_read(tp, offset, &val))
11499 if (offset != TG3_NVM_DIR_END) {
11501 if (tg3_nvram_read(tp, offset + 4, &offset))
11504 offset = tg3_nvram_logical_addr(tp, offset);
11508 if (!offset || !len) {
11518 for (i = 0; i < len; i += 4) {
11523 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11529 unsigned int pos = 0;
11531 ptr = (
u8 *)&buf[0];
11532 for (i = 0; pos < len && i < 3; i++, pos +=
cnt, ptr +=
cnt) {
11553 #define NVRAM_TEST_SIZE 0x100
11554 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11555 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11556 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11557 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11558 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11559 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11560 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11561 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11563 static int tg3_test_nvram(
struct tg3 *tp)
11572 if (tg3_nvram_read(tp, 0, &magic) != 0)
11614 for (i = 0, j = 0; i <
size; i += 4, j++) {
11615 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11624 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11626 u8 *buf8 = (
u8 *) buf, csum8 = 0;
11633 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i <
size; i++)
11636 for (i = 0; i <
size; i++)
11649 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11653 u8 *buf8 = (
u8 *) buf;
11657 if ((i == 0) || (i == 8)) {
11661 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11662 parity[k++] = buf8[i] & msk;
11664 }
else if (i == 16) {
11668 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11669 parity[k++] = buf8[i] & msk;
11672 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11673 parity[k++] = buf8[i] & msk;
11676 data[j++] = buf8[
i];
11683 if ((hw8 & 0x1) && parity[i])
11685 else if (!(hw8 & 0x1) && !parity[i])
11695 csum = calc_crc((
unsigned char *) buf, 0x10);
11700 csum = calc_crc((
unsigned char *) &buf[0x74/4], 0x88);
11706 buf = tg3_vpd_readblock(tp, &len);
11712 j = pci_vpd_lrdt_size(&((
u8 *)buf)[i]);
11727 for (i = 0; i <=
j; i++)
11728 csum8 += ((
u8 *)buf)[
i];
11742 #define TG3_SERDES_TIMEOUT_SEC 2
11743 #define TG3_COPPER_TIMEOUT_SEC 6
11745 static int tg3_test_link(
struct tg3 *tp)
11749 if (!netif_running(tp->
dev))
11757 for (i = 0; i <
max; i++) {
11758 if (netif_carrier_ok(tp->
dev))
11769 static int tg3_test_registers(
struct tg3 *tp)
11771 int i, is_5705, is_5750;
11772 u32
offset, read_mask, write_mask,
val, save_val, read_val;
11776 #define TG3_FL_5705 0x1
11777 #define TG3_FL_NOT_5705 0x2
11778 #define TG3_FL_NOT_5788 0x4
11779 #define TG3_FL_NOT_5750 0x8
11785 0x00000000, 0x00ef6f8c },
11787 0x00000000, 0x01ef6b8c },
11789 0x03800107, 0x00000000 },
11791 0x03800100, 0x00000000 },
11793 0x00000000, 0x0000ffff },
11795 0x00000000, 0xffffffff },
11797 0x00000000, 0x0000ffff },
11799 0x00000000, 0x00000070 },
11801 0x00000000, 0x00003fff },
11803 0x00000000, 0x000007fc },
11805 0x00000000, 0x000007dc },
11807 0x00000000, 0xffffffff },
11809 0x00000000, 0xffffffff },
11811 0x00000000, 0xffffffff },
11813 0x00000000, 0xffffffff },
11817 0x00000000, 0xffffffff },
11819 0x00000000, 0xffffffff },
11821 0x00000000, 0x00000003 },
11823 0x00000000, 0xffffffff },
11825 0x00000000, 0xffffffff },
11827 0x00000000, 0xffffffff },
11829 0x00000000, 0xffff0002 },
11831 0x00000000, 0xffffffff },
11835 0x00000000, 0xffffffff },
11837 0x00000000, 0x000003ff },
11839 0x00000000, 0xffffffff },
11843 0x00000000, 0x00000004 },
11845 0x00000000, 0x000000f6 },
11847 0x00000000, 0xffffffff },
11849 0x00000000, 0x000003ff },
11851 0x00000000, 0xffffffff },
11853 0x00000000, 0x000003ff },
11855 0x00000000, 0xffffffff },
11857 0x00000000, 0x000000ff },
11859 0x00000000, 0xffffffff },
11861 0x00000000, 0x000000ff },
11863 0x00000000, 0xffffffff },
11865 0x00000000, 0xffffffff },
11867 0x00000000, 0xffffffff },
11869 0x00000000, 0x000000ff },
11871 0x00000000, 0xffffffff },
11873 0x00000000, 0x000000ff },
11875 0x00000000, 0xffffffff },
11877 0x00000000, 0xffffffff },
11879 0x00000000, 0xffffffff },
11881 0x00000000, 0xffffffff },
11883 0x00000000, 0xffffffff },
11885 0xffffffff, 0x00000000 },
11887 0xffffffff, 0x00000000 },
11891 0x00000000, 0x007fff80 },
11893 0x00000000, 0x007fffff },
11895 0x00000000, 0x0000003f },
11897 0x00000000, 0x000001ff },
11899 0x00000000, 0x000001ff },
11901 0xffffffff, 0x00000000 },
11903 0xffffffff, 0x00000000 },
11907 0x00000000, 0x000001ff },
11909 0x00000000, 0x000001ff },
11911 0x00000000, 0x000007ff },
11913 0x00000000, 0x000001ff },
11915 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11918 is_5705 = is_5750 = 0;
11925 for (i = 0; reg_tbl[
i].offset != 0xffff; i++) {
11926 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11929 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11933 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11936 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11939 offset = (
u32) reg_tbl[i].offset;
11940 read_mask = reg_tbl[
i].read_mask;
11941 write_mask = reg_tbl[
i].write_mask;
11944 save_val =
tr32(offset);
11947 read_val = save_val & read_mask;
11954 val =
tr32(offset);
11957 if (((val & read_mask) != read_val) || (val & write_mask))
11964 tw32(offset, read_mask | write_mask);
11966 val =
tr32(offset);
11969 if ((val & read_mask) != read_val)
11973 if ((val & write_mask) != write_mask)
11976 tw32(offset, save_val);
11983 netdev_err(tp->
dev,
11984 "Register test failed at offset %x\n", offset);
11985 tw32(offset, save_val);
11989 static int tg3_do_mem_test(
struct tg3 *tp, u32 offset, u32 len)
11991 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11995 for (i = 0; i <
ARRAY_SIZE(test_pattern); i++) {
11996 for (j = 0; j < len; j += 4) {
11999 tg3_write_mem(tp, offset + j, test_pattern[i]);
12000 tg3_read_mem(tp, offset + j, &val);
12001 if (val != test_pattern[i])
12008 static int tg3_test_memory(
struct tg3 *tp)
12010 static struct mem_entry {
12013 } mem_tbl_570x[] = {
12014 { 0x00000000, 0x00b50},
12015 { 0x00002000, 0x1c000},
12016 { 0xffffffff, 0x00000}
12017 }, mem_tbl_5705[] = {
12018 { 0x00000100, 0x0000c},
12019 { 0x00000200, 0x00008},
12020 { 0x00004000, 0x00800},
12021 { 0x00006000, 0x01000},
12022 { 0x00008000, 0x02000},
12023 { 0x00010000, 0x0e000},
12024 { 0xffffffff, 0x00000}
12025 }, mem_tbl_5755[] = {
12026 { 0x00000200, 0x00008},
12027 { 0x00004000, 0x00800},
12028 { 0x00006000, 0x00800},
12029 { 0x00008000, 0x02000},
12030 { 0x00010000, 0x0c000},
12031 { 0xffffffff, 0x00000}
12032 }, mem_tbl_5906[] = {
12033 { 0x00000200, 0x00008},
12034 { 0x00004000, 0x00400},
12035 { 0x00006000, 0x00400},
12036 { 0x00008000, 0x01000},
12037 { 0x00010000, 0x01000},
12038 { 0xffffffff, 0x00000}
12039 }, mem_tbl_5717[] = {
12040 { 0x00000200, 0x00008},
12041 { 0x00010000, 0x0a000},
12042 { 0x00020000, 0x13c00},
12043 { 0xffffffff, 0x00000}
12044 }, mem_tbl_57765[] = {
12045 { 0x00000200, 0x00008},
12046 { 0x00004000, 0x00800},
12047 { 0x00006000, 0x09800},
12048 { 0x00010000, 0x0a000},
12049 { 0xffffffff, 0x00000}
12051 struct mem_entry *mem_tbl;
12056 mem_tbl = mem_tbl_5717;
12057 else if (
tg3_flag(tp, 57765_CLASS))
12058 mem_tbl = mem_tbl_57765;
12060 mem_tbl = mem_tbl_5755;
12062 mem_tbl = mem_tbl_5906;
12064 mem_tbl = mem_tbl_5705;
12066 mem_tbl = mem_tbl_570x;
12068 for (i = 0; mem_tbl[
i].offset != 0xffffffff; i++) {
12069 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12077 #define TG3_TSO_MSS 500
12079 #define TG3_TSO_IP_HDR_LEN 20
12080 #define TG3_TSO_TCP_HDR_LEN 20
12081 #define TG3_TSO_TCP_OPT_LEN 12
12083 static const u8 tg3_tso_header[] = {
12085 0x45, 0x00, 0x00, 0x00,
12086 0x00, 0x00, 0x40, 0x00,
12087 0x40, 0x06, 0x00, 0x00,
12088 0x0a, 0x00, 0x00, 0x01,
12089 0x0a, 0x00, 0x00, 0x02,
12090 0x0d, 0x00, 0xe0, 0x00,
12091 0x00, 0x00, 0x01, 0x00,
12092 0x00, 0x00, 0x02, 0x00,
12093 0x80, 0x10, 0x10, 0x00,
12094 0x14, 0x09, 0x00, 0x00,
12095 0x01, 0x01, 0x08, 0x0a,
12096 0x11, 0x11, 0x11, 0x11,
12097 0x11, 0x11, 0x11, 0x11,
12100 static int tg3_run_loopback(
struct tg3 *tp, u32 pktsz,
bool tso_loopback)
12102 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12103 u32 base_flags = 0, mss = 0, desc_idx, coal_now,
data_off,
val;
12108 int num_pkts, tx_len, rx_len,
i,
err;
12113 tnapi = &tp->
napi[0];
12114 rnapi = &tp->
napi[0];
12117 rnapi = &tp->
napi[1];
12119 tnapi = &tp->
napi[1];
12126 skb = netdev_alloc_skb(tp->
dev, tx_len);
12130 tx_data =
skb_put(skb, tx_len);
12132 memset(tx_data + 6, 0x0, 8);
12136 if (tso_loopback) {
12143 sizeof(tg3_tso_header));
12146 val = tx_len -
ETH_ALEN * 2 -
sizeof(tg3_tso_header);
12160 th = (
struct tcphdr *)&tx_data[val];
12166 mss |= (hdr_len & 0xc) << 12;
12167 if (hdr_len & 0x10)
12168 base_flags |= 0x00000010;
12169 base_flags |= (hdr_len & 0x3e0) << 5;
12170 }
else if (
tg3_flag(tp, HW_TSO_2))
12171 mss |= hdr_len << 9;
12172 else if (
tg3_flag(tp, HW_TSO_1) ||
12174 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12176 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12179 data_off =
ETH_ALEN * 2 +
sizeof(tg3_tso_header);
12184 if (
tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12189 for (i = data_off; i < tx_len; i++)
12190 tx_data[i] = (
u8) (i & 0xff);
12193 if (pci_dma_mapping_error(tp->
pdev, map)) {
12194 dev_kfree_skb(skb);
12198 val = tnapi->tx_prod;
12207 rx_start_idx = rnapi->
hw_status->idx[0].rx_producer;
12209 budget = tg3_tx_avail(tnapi);
12210 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12213 dev_kfree_skb(skb);
12228 for (i = 0; i < 35; i++) {
12234 tx_idx = tnapi->
hw_status->idx[0].tx_consumer;
12235 rx_idx = rnapi->
hw_status->idx[0].rx_producer;
12236 if ((tx_idx == tnapi->tx_prod) &&
12237 (rx_idx == (rx_start_idx + num_pkts)))
12241 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12242 dev_kfree_skb(skb);
12244 if (tx_idx != tnapi->tx_prod)
12247 if (rx_idx != rx_start_idx + num_pkts)
12251 while (rx_idx != rx_start_idx) {
12252 desc = &rnapi->
rx_rcb[rx_start_idx++];
12263 if (!tso_loopback) {
12264 if (rx_len != tx_len)
12268 if (opaque_key != RXD_OPAQUE_RING_STD)
12280 if (opaque_key == RXD_OPAQUE_RING_STD) {
12291 pci_dma_sync_single_for_cpu(tp->
pdev, map, rx_len,
12295 for (i = data_off; i < rx_len; i++, val++) {
12296 if (*(rx_data + i) != (
u8) (val & 0xff))
12308 #define TG3_STD_LOOPBACK_FAILED 1
12309 #define TG3_JMB_LOOPBACK_FAILED 2
12310 #define TG3_TSO_LOOPBACK_FAILED 4
12311 #define TG3_LOOPBACK_FAILED \
12312 (TG3_STD_LOOPBACK_FAILED | \
12313 TG3_JMB_LOOPBACK_FAILED | \
12314 TG3_TSO_LOOPBACK_FAILED)
12316 static int tg3_test_loopback(
struct tg3 *tp,
u64 *data,
bool do_extlpbk)
12320 u32 jmb_pkt_sz = 9000;
12328 if (!netif_running(tp->
dev)) {
12336 err = tg3_reset_hw(tp, 1);
12361 tg3_mac_loopback(tp,
true);
12366 if (
tg3_flag(tp, JUMBO_RING_ENABLE) &&
12367 tg3_run_loopback(tp, jmb_pkt_sz +
ETH_HLEN,
false))
12370 tg3_mac_loopback(tp,
false);
12377 tg3_phy_lpbk_set(tp, 0,
false);
12380 for (i = 0; i < 100; i++) {
12391 if (
tg3_flag(tp, JUMBO_RING_ENABLE) &&
12392 tg3_run_loopback(tp, jmb_pkt_sz +
ETH_HLEN,
false))
12396 tg3_phy_lpbk_set(tp, 0,
true);
12409 if (
tg3_flag(tp, JUMBO_RING_ENABLE) &&
12410 tg3_run_loopback(tp, jmb_pkt_sz +
ETH_HLEN,
false))
12416 tg3_phy_toggle_apd(tp,
true);
12419 err = (data[0] | data[1] | data[2]) ? -
EIO : 0;
12430 struct tg3 *tp = netdev_priv(dev);
12434 tg3_power_up(tp)) {
12442 if (tg3_test_nvram(tp) != 0) {
12446 if (!doextlpbk && tg3_test_link(tp)) {
12451 int err, err2 = 0, irq_sync = 0;
12453 if (netif_running(dev)) {
12455 tg3_netif_stop(tp);
12459 tg3_full_lock(tp, irq_sync);
12462 err = tg3_nvram_lock(tp);
12467 tg3_nvram_unlock(tp);
12472 if (tg3_test_registers(tp) != 0) {
12477 if (tg3_test_memory(tp) != 0) {
12485 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12488 tg3_full_unlock(tp);
12490 if (tg3_test_interrupt(tp) != 0) {
12495 tg3_full_lock(tp, 0);
12498 if (netif_running(dev)) {
12500 err2 = tg3_restart_hw(tp, 1);
12502 tg3_netif_start(tp);
12505 tg3_full_unlock(tp);
12507 if (irq_sync && !err2)
12511 tg3_power_down(tp);
12515 static int tg3_ioctl(
struct net_device *dev,
struct ifreq *ifr,
int cmd)
12518 struct tg3 *tp = netdev_priv(dev);
12540 if (!netif_running(dev))
12543 spin_lock_bh(&tp->
lock);
12544 err = tg3_readphy(tp, data->
reg_num & 0x1f, &mii_regval);
12545 spin_unlock_bh(&tp->
lock);
12556 if (!netif_running(dev))
12559 spin_lock_bh(&tp->
lock);
12560 err = tg3_writephy(tp, data->
reg_num & 0x1f, data->
val_in);
12561 spin_unlock_bh(&tp->
lock);
12574 struct tg3 *tp = netdev_priv(dev);
12582 struct tg3 *tp = netdev_priv(dev);
12583 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12584 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12626 if (netif_running(dev)) {
12627 tg3_full_lock(tp, 0);
12628 __tg3_set_coalesce(tp, &tp->
coal);
12629 tg3_full_unlock(tp);
12634 static const struct ethtool_ops tg3_ethtool_ops = {
12635 .get_settings = tg3_get_settings,
12636 .set_settings = tg3_set_settings,
12637 .get_drvinfo = tg3_get_drvinfo,
12638 .get_regs_len = tg3_get_regs_len,
12639 .get_regs = tg3_get_regs,
12640 .get_wol = tg3_get_wol,
12641 .set_wol = tg3_set_wol,
12642 .get_msglevel = tg3_get_msglevel,
12643 .set_msglevel = tg3_set_msglevel,
12644 .nway_reset = tg3_nway_reset,
12646 .get_eeprom_len = tg3_get_eeprom_len,
12647 .get_eeprom = tg3_get_eeprom,
12648 .set_eeprom = tg3_set_eeprom,
12649 .get_ringparam = tg3_get_ringparam,
12650 .set_ringparam = tg3_set_ringparam,
12651 .get_pauseparam = tg3_get_pauseparam,
12652 .set_pauseparam = tg3_set_pauseparam,
12653 .self_test = tg3_self_test,
12654 .get_strings = tg3_get_strings,
12655 .set_phys_id = tg3_set_phys_id,
12656 .get_ethtool_stats = tg3_get_ethtool_stats,
12657 .get_coalesce = tg3_get_coalesce,
12658 .set_coalesce = tg3_set_coalesce,
12659 .get_sset_count = tg3_get_sset_count,
12660 .get_rxnfc = tg3_get_rxnfc,
12661 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12662 .get_rxfh_indir = tg3_get_rxfh_indir,
12663 .set_rxfh_indir = tg3_set_rxfh_indir,
12664 .get_channels = tg3_get_channels,
12665 .set_channels = tg3_set_channels,
12672 struct tg3 *tp = netdev_priv(dev);
12674 spin_lock_bh(&tp->
lock);
12676 spin_unlock_bh(&tp->
lock);
12680 tg3_get_nstats(tp, stats);
12681 spin_unlock_bh(&tp->
lock);
12686 static void tg3_set_rx_mode(
struct net_device *dev)
12688 struct tg3 *tp = netdev_priv(dev);
12690 if (!netif_running(dev))
12693 tg3_full_lock(tp, 0);
12694 __tg3_set_rx_mode(dev);
12695 tg3_full_unlock(tp);
12698 static inline void tg3_set_mtu(
struct net_device *dev,
struct tg3 *tp,
12701 dev->
mtu = new_mtu;
12719 static int tg3_change_mtu(
struct net_device *dev,
int new_mtu)
12721 struct tg3 *tp = netdev_priv(dev);
12722 int err, reset_phy = 0;
12724 if (new_mtu < TG3_MIN_MTU || new_mtu >
TG3_MAX_MTU(tp))
12727 if (!netif_running(dev)) {
12731 tg3_set_mtu(dev, tp, new_mtu);
12737 tg3_netif_stop(tp);
12739 tg3_full_lock(tp, 1);
12743 tg3_set_mtu(dev, tp, new_mtu);
12751 err = tg3_restart_hw(tp, reset_phy);
12754 tg3_netif_start(tp);
12756 tg3_full_unlock(tp);
12765 .ndo_open = tg3_open,
12766 .ndo_stop = tg3_close,
12767 .ndo_start_xmit = tg3_start_xmit,
12768 .ndo_get_stats64 = tg3_get_stats64,
12770 .ndo_set_rx_mode = tg3_set_rx_mode,
12771 .ndo_set_mac_address = tg3_set_mac_addr,
12772 .ndo_do_ioctl = tg3_ioctl,
12773 .ndo_tx_timeout = tg3_tx_timeout,
12774 .ndo_change_mtu = tg3_change_mtu,
12775 .ndo_fix_features = tg3_fix_features,
12776 .ndo_set_features = tg3_set_features,
12777 #ifdef CONFIG_NET_POLL_CONTROLLER
12778 .ndo_poll_controller = tg3_poll_controller,
12782 static void __devinit tg3_get_eeprom_size(
struct tg3 *tp)
12788 if (tg3_nvram_read(tp, 0, &magic) != 0)
12803 while (cursize < tp->nvram_size) {
12804 if (tg3_nvram_read(tp, cursize, &val) != 0)
12816 static void __devinit tg3_get_nvram_size(
struct tg3 *tp)
12820 if (
tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12825 tg3_get_eeprom_size(tp);
12829 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12849 static void __devinit tg3_get_nvram_info(
struct tg3 *tp)
12900 static void __devinit tg3_nvram_get_pagesize(
struct tg3 *tp, u32 nvmcfg1)
12927 static void __devinit tg3_get_5752_nvram_info(
struct tg3 *tp)
12934 if (nvcfg1 & (1 << 27))
12958 tg3_nvram_get_pagesize(tp, nvcfg1);
12968 static void __devinit tg3_get_5755_nvram_info(
struct tg3 *tp)
12975 if (nvcfg1 & (1 << 27)) {
13024 static void __devinit tg3_get_5787_nvram_info(
struct tg3 *tp)
13030 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13062 static void __devinit tg3_get_5761_nvram_info(
struct tg3 *tp)
13064 u32 nvcfg1, protect = 0;
13069 if (nvcfg1 & (1 << 27)) {
13137 static void __devinit tg3_get_5906_nvram_info(
struct tg3 *tp)
13144 static void __devinit tg3_get_57780_nvram_info(
struct tg3 *tp)
13150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13171 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13194 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13211 tg3_nvram_get_pagesize(tp, nvcfg1);
13217 static void __devinit tg3_get_5717_nvram_info(
struct tg3 *tp)
13223 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13244 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13271 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13290 tg3_nvram_get_pagesize(tp, nvcfg1);
13295 static void __devinit tg3_get_5720_nvram_info(
struct tg3 *tp)
13297 u32 nvcfg1, nvmpinstrp;
13302 switch (nvmpinstrp) {
13331 switch (nvmpinstrp) {
13373 switch (nvmpinstrp) {
13402 tg3_nvram_get_pagesize(tp, nvcfg1);
13426 if (tg3_nvram_lock(tp)) {
13427 netdev_warn(tp->
dev,
13428 "Cannot get nvram lock, %s failed\n",
13432 tg3_enable_nvram_access(tp);
13437 tg3_get_5752_nvram_info(tp);
13439 tg3_get_5755_nvram_info(tp);
13443 tg3_get_5787_nvram_info(tp);
13445 tg3_get_5761_nvram_info(tp);
13447 tg3_get_5906_nvram_info(tp);
13450 tg3_get_57780_nvram_info(tp);
13453 tg3_get_5717_nvram_info(tp);
13455 tg3_get_5720_nvram_info(tp);
13457 tg3_get_nvram_info(tp);
13460 tg3_get_nvram_size(tp);
13462 tg3_disable_nvram_access(tp);
13463 tg3_nvram_unlock(tp);
13469 tg3_get_eeprom_size(tp);
13546 for (i = 0; i <
ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13548 tp->
pdev->subsystem_vendor) &&
13549 (subsys_id_to_phy_id[
i].subsys_devid ==
13550 tp->
pdev->subsystem_device))
13551 return &subsys_id_to_phy_id[
i];
13556 static void __devinit tg3_get_eeprom_hw_cfg(
struct tg3 *tp)
13585 u32 nic_cfg, led_cfg;
13586 u32 nic_phy_id,
ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13587 int eeprom_phy_serdes = 0;
13597 (ver > 0) && (ver < 0x100))
13605 eeprom_phy_serdes = 1;
13608 if (nic_phy_id != 0) {
13612 eeprom_phy_id = (id1 >> 16) << 10;
13613 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13614 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13618 tp->
phy_id = eeprom_phy_id;
13619 if (eeprom_phy_serdes) {
13687 if ((tp->
pdev->subsystem_vendor ==
13689 (tp->
pdev->subsystem_device == 0x205a ||
13690 tp->
pdev->subsystem_device == 0x2063))
13697 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13717 if (cfg2 & (1 << 17))
13722 if (cfg2 & (1 << 18))
13756 static int __devinit tg3_issue_otp_command(
struct tg3 *tp, u32 cmd)
13765 for (i = 0; i < 100; i++) {
13779 static u32
__devinit tg3_read_otp_phycfg(
struct tg3 *tp)
13781 u32 bhalf_otp, thalf_otp;
13802 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13805 static void __devinit tg3_phy_init_link_config(
struct tg3 *tp)
13815 ADVERTISED_100baseT_Full |
13834 u32 hw_phy_id_1, hw_phy_id_2;
13835 u32 hw_phy_id, hw_phy_id_masked;
13860 return tg3_phy_init(tp);
13874 err |= tg3_readphy(tp,
MII_PHYSID1, &hw_phy_id_1);
13875 err |= tg3_readphy(tp,
MII_PHYSID2, &hw_phy_id_2);
13877 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13878 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13879 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13901 p = tg3_lookup_by_subsys(tp);
13921 tg3_phy_init_link_config(tp);
13929 if (!tg3_readphy(tp,
MII_BMSR, &bmsr) &&
13930 (bmsr & BMSR_LSTATUS))
13931 goto skip_phy_reset;
13933 err = tg3_phy_reset(tp);
13937 tg3_phy_set_wirespeed(tp);
13939 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13940 tg3_phy_autoneg_cfg(tp, tp->
link_config.advertising,
13950 err = tg3_init_5401phy_dsp(tp);
13954 err = tg3_init_5401phy_dsp(tp);
13963 unsigned int block_end, rosize, len;
13967 vpd_data = (
u8 *)tg3_vpd_readblock(tp, &vpdlen);
13973 goto out_not_found;
13975 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13979 if (block_end > vpdlen)
13980 goto out_not_found;
13985 len = pci_vpd_info_field_size(&vpd_data[j]);
13988 if (j + len > block_end || len != 4 ||
13989 memcmp(&vpd_data[j],
"1028", 4))
13997 len = pci_vpd_info_field_size(&vpd_data[j]);
14000 if (j + len > block_end)
14011 goto out_not_found;
14013 len = pci_vpd_info_field_size(&vpd_data[i]);
14017 (len + i) > vpdlen)
14018 goto out_not_found;
14080 static int __devinit tg3_fw_img_is_valid(
struct tg3 *tp, u32 offset)
14084 if (tg3_nvram_read(tp, offset, &val) ||
14085 (val & 0xfc000000) != 0x0c000000 ||
14086 tg3_nvram_read(tp, offset + 4, &val) ||
14097 bool newver =
false;
14099 if (tg3_nvram_read(tp, 0
xc, &offset) ||
14100 tg3_nvram_read(tp, 0x4, &start))
14103 offset = tg3_nvram_logical_addr(tp, offset);
14105 if (tg3_nvram_read(tp, offset, &val))
14108 if ((val & 0xfc000000) == 0x0c000000) {
14109 if (tg3_nvram_read(tp, offset + 4, &val))
14120 tg3_nvram_read(tp, offset + 8, &ver_offset))
14123 offset = offset + ver_offset -
start;
14124 for (i = 0; i < 16; i += 4) {
14126 if (tg3_nvram_read_be32(tp, offset + i, &v))
14141 "v%d.%02d", major, minor);
14145 static void __devinit tg3_read_hwsb_ver(
struct tg3 *tp)
14147 u32
val, major, minor;
14161 static void __devinit tg3_read_sb_ver(
struct tg3 *tp, u32 val)
14163 u32
offset, major, minor, build;
14193 if (tg3_nvram_read(tp, offset, &val))
14202 if (minor > 99 || build > 26)
14207 " v%d.%02d", major, minor);
14216 static void __devinit tg3_read_mgmtfw_ver(
struct tg3 *tp)
14224 if (tg3_nvram_read(tp, offset, &val))
14231 if (offset == TG3_NVM_DIR_END)
14235 start = 0x08000000;
14236 else if (tg3_nvram_read(tp, offset - 4, &start))
14239 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14240 !tg3_fw_img_is_valid(tp, offset) ||
14241 tg3_nvram_read(tp, offset + 8, &val))
14244 offset += val -
start;
14248 tp->
fw_ver[vlen++] =
',';
14249 tp->
fw_ver[vlen++] =
' ';
14251 for (i = 0; i < 4; i++) {
14253 if (tg3_nvram_read_be32(tp, offset, &v))
14256 offset +=
sizeof(
v);
14277 if (!(apedata & APE_FW_STATUS_READY))
14284 static void __devinit tg3_read_dash_ver(
struct tg3 *tp)
14310 bool vpd_vers =
false;
14320 if (tg3_nvram_read(tp, 0, &val))
14324 tg3_read_bc_ver(tp);
14326 tg3_read_sb_ver(tp, val);
14328 tg3_read_hwsb_ver(tp);
14332 tg3_probe_ncsi(tp);
14334 tg3_read_dash_ver(tp);
14335 }
else if (!vpd_vers) {
14336 tg3_read_mgmtfw_ver(tp);
14343 static inline u32 tg3_rx_ret_ring_size(
struct tg3 *tp)
14345 if (
tg3_flag(tp, LRG_PROD_RING_CAP))
14363 unsigned int func, devnr = tp->
pdev->devfn & ~7;
14365 for (func = 0; func < 8; func++) {
14367 if (peer && peer != tp->
pdev)
14388 static void __devinit tg3_detect_asic_rev(
struct tg3 *tp, u32 misc_ctrl_reg)
14465 static int __devinit tg3_get_invariants(
struct tg3 *tp)
14468 u32 pci_state_reg, grc_misc_cfg;
14496 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14517 static struct tg3_dev_id {
14521 } ich_chipsets[] = {
14532 struct tg3_dev_id *
pci_id = &ich_chipsets[0];
14535 while (pci_id->vendor != 0) {
14543 if (bridge->
revision > pci_id->rev)
14548 tp->
pdev->bus->number)) {
14557 static struct tg3_dev_id {
14560 } bridge_chipsets[] = {
14565 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14568 while (pci_id->vendor != 0) {
14578 tp->
pdev->bus->number) &&
14580 tp->
pdev->bus->number)) {
14606 tp->
pdev->bus->number) &&
14608 tp->
pdev->bus->number)) {
14623 else if (
tg3_flag(tp, 57765_PLUS))
14625 else if (
tg3_flag(tp, 5755_PLUS) ||
14628 else if (
tg3_flag(tp, 5750_PLUS)) {
14720 if (pci_is_pcie(tp->
pdev)) {
14746 }
else if (!
tg3_flag(tp, 5705_PLUS) ||
14751 "Cannot find PCI-X capability, aborting\n");
14803 pci_read_config_dword(tp->
pdev,
14808 pci_write_config_dword(tp->
pdev,
14832 tp->
read32 = tg3_read32;
14840 if (
tg3_flag(tp, PCIX_TARGET_HWBUG))
14841 tp->
write32 = tg3_write_indirect_reg32;
14852 tp->
write32 = tg3_write_flush_reg32;
14857 if (
tg3_flag(tp, MBOX_WRITE_REORDER))
14861 if (
tg3_flag(tp, ICH_WORKAROUND)) {
14862 tp->
read32 = tg3_read_indirect_reg32;
14863 tp->
write32 = tg3_write_indirect_reg32;
14883 if (tp->
write32 == tg3_write_indirect_reg32 ||
14901 pci_read_config_dword(tp->
pdev,
14931 tg3_get_eeprom_hw_cfg(tp);
14949 tg3_ape_lock_init(tp);
14984 tg3_pwrsrc_switch_to_vmain(tp);
15040 tp->
phy_otp = tg3_read_otp_phycfg(tp);
15067 err = tg3_mdio_init(tp);
15084 tg3_switch_clocks(tp);
15092 !
tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15107 writel(0x00000000, sram_base);
15108 writel(0x00000000, sram_base + 4);
15109 writel(0xffffffff, sram_base + 4);
15110 if (
readl(sram_base) != 0x00000000)
15116 tg3_nvram_init(tp);
15129 if (
tg3_flag(tp, TAGGED_STATUS)) {
15146 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15162 err = tg3_phy_probe(tp);
15164 dev_err(&tp->
pdev->dev,
"phy probe failed, err %d\n", err);
15170 tg3_read_fw_ver(tp);
15212 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15231 if (
tg3_flag(tp, ASPM_WORKAROUND))
15238 #ifdef CONFIG_SPARC
15239 static int __devinit tg3_get_macaddr_sparc(
struct tg3 *tp)
15243 struct device_node *dp = pci_device_to_OF_node(pdev);
15244 const unsigned char *
addr;
15248 if (addr && len == 6) {
15256 static int __devinit tg3_get_default_macaddr_sparc(
struct tg3 *tp)
15266 static int __devinit tg3_get_device_address(
struct tg3 *tp)
15269 u32
hi,
lo, mac_offset;
15272 #ifdef CONFIG_SPARC
15273 if (!tg3_get_macaddr_sparc(tp))
15282 if (tg3_nvram_lock(tp))
15285 tg3_nvram_unlock(tp);
15286 }
else if (
tg3_flag(tp, 5717_PLUS)) {
15290 mac_offset += 0x18c;
15296 if ((hi >> 16) == 0x484b) {
15297 dev->
dev_addr[0] = (hi >> 8) & 0xff;
15298 dev->
dev_addr[1] = (hi >> 0) & 0xff;
15301 dev->
dev_addr[2] = (lo >> 24) & 0xff;
15302 dev->
dev_addr[3] = (lo >> 16) & 0xff;
15303 dev->
dev_addr[4] = (lo >> 8) & 0xff;
15304 dev->
dev_addr[5] = (lo >> 0) & 0xff;
15307 addr_ok = is_valid_ether_addr(&dev->
dev_addr[0]);
15312 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15313 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15323 dev->
dev_addr[4] = (lo >> 8) & 0xff;
15324 dev->
dev_addr[3] = (lo >> 16) & 0xff;
15325 dev->
dev_addr[2] = (lo >> 24) & 0xff;
15327 dev->
dev_addr[0] = (hi >> 8) & 0xff;
15331 if (!is_valid_ether_addr(&dev->
dev_addr[0])) {
15332 #ifdef CONFIG_SPARC
15333 if (!tg3_get_default_macaddr_sparc(tp))
15342 #define BOUNDARY_SINGLE_CACHELINE 1
15343 #define BOUNDARY_MULTI_CACHELINE 2
15345 static u32
__devinit tg3_calc_dma_bndry(
struct tg3 *tp, u32 val)
15347 int cacheline_size;
15353 cacheline_size = 1024;
15355 cacheline_size = (
int) byte * 4;
15365 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15368 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15395 switch (cacheline_size) {
15419 }
else if (
tg3_flag(tp, PCI_EXPRESS)) {
15420 switch (cacheline_size) {
15437 switch (cacheline_size) {
15489 u32 sram_dma_descs;
15502 test_desc.addr_hi = ((
u64) buf_dma) >> 32;
15503 test_desc.addr_lo = buf_dma & 0xffffffff;
15504 test_desc.nic_mbuf = 0x00002100;
15505 test_desc.len =
size;
15520 test_desc.cqid_sqid = (13 << 8) | 2;
15525 test_desc.cqid_sqid = (16 << 8) | 7;
15530 test_desc.flags = 0x00000005;
15532 for (i = 0; i < (
sizeof(test_desc) /
sizeof(u32)); i++) {
15535 val = *(((u32 *)&test_desc) +
i);
15537 sram_dma_descs + (i *
sizeof(u32)));
15548 for (i = 0; i < 40; i++) {
15555 if ((val & 0xffff) == sram_dma_descs) {
15566 #define TEST_BUFFER_SIZE 0x2000
15576 u32 *
buf, saved_dma_rwctrl;
15597 }
else if (!
tg3_flag(tp, PCIX_MODE)) {
15607 u32 read_water = 0x7;
15613 if (
tg3_flag(tp, 40BIT_DMA_BUG) &&
15616 else if (ccval == 0x6 || ccval == 0x7)
15663 tg3_switch_clocks(tp);
15687 "%s: Buffer write failed. err = %d\n",
15696 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15699 "%s: Buffer corrupted on device! "
15700 "(%d != %d)\n", __func__, val, i);
15709 dev_err(&tp->
pdev->dev,
"%s: Buffer read failed. "
15710 "err = %d\n", __func__, ret);
15727 "%s: Buffer corrupted on read back! "
15728 "(%d != %d)\n", __func__, p[i], i);
15763 static void __devinit tg3_init_bufmgr_config(
struct tg3 *tp)
15779 }
else if (
tg3_flag(tp, 5705_PLUS)) {
15819 static char *
__devinit tg3_phy_string(
struct tg3 *tp)
15845 case 0:
return "serdes";
15846 default:
return "unknown";
15853 strcpy(str,
"PCI Express");
15855 }
else if (
tg3_flag(tp, PCIX_MODE)) {
15860 if ((clock_ctrl == 7) ||
15864 else if (clock_ctrl == 0)
15866 else if (clock_ctrl == 2)
15868 else if (clock_ctrl == 4)
15870 else if (clock_ctrl == 6)
15890 memset(ec, 0,
sizeof(*ec));
15923 u32 sndmbx, rcvmbx, intmbx;
15925 u64 dma_mask, persist_dma_mask;
15932 dev_err(&pdev->
dev,
"Cannot enable PCI device, aborting\n");
15938 dev_err(&pdev->
dev,
"Cannot obtain PCI resources, aborting\n");
15939 goto err_out_disable_pdev;
15948 "Cannot find Power Management capability, aborting\n");
15950 goto err_out_free_res;
15955 dev_err(&pdev->
dev,
"Transition to D0 failed, aborting\n");
15956 goto err_out_free_res;
15962 goto err_out_power_down;
15967 tp = netdev_priv(dev);
15984 MISC_HOST_CTRL_MASK_PCI_INT |
15997 #ifdef __BIG_ENDIAN
16006 dev_err(&pdev->
dev,
"Cannot map device registers, aborting\n");
16008 goto err_out_free_dev;
16023 "Cannot map APE registers, aborting\n");
16025 goto err_out_iounmap;
16037 err = tg3_get_invariants(tp);
16040 "Problem fetching invariants of chip, aborting\n");
16041 goto err_out_apeunmap;
16052 else if (
tg3_flag(tp, 40BIT_DMA_BUG)) {
16054 #ifdef CONFIG_HIGHMEM
16062 err = pci_set_dma_mask(pdev, dma_mask);
16065 err = pci_set_consistent_dma_mask(pdev,
16068 dev_err(&pdev->
dev,
"Unable to obtain 64 bit "
16069 "DMA for consistent allocations\n");
16070 goto err_out_apeunmap;
16078 "No usable DMA configuration, aborting\n");
16079 goto err_out_apeunmap;
16083 tg3_init_bufmgr_config(tp);
16140 err = tg3_get_device_address(tp);
16143 "Could not obtain valid ethernet address, aborting\n");
16144 goto err_out_apeunmap;
16158 err = tg3_test_dma(tp);
16160 dev_err(&pdev->
dev,
"DMA engine test failed, aborting\n");
16161 goto err_out_apeunmap;
16167 for (i = 0; i < tp->
irq_max; i++) {
16179 tnapi->consmbox = rcvmbx;
16210 pci_set_drvdata(pdev, dev);
16214 tg3_frob_aux_power(tp,
false);
16217 tg3_timer_init(tp);
16221 dev_err(&pdev->
dev,
"Cannot register net device, aborting\n");
16222 goto err_out_apeunmap;
16225 netdev_info(dev,
"Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16228 tg3_bus_string(tp, str),
16235 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16236 phydev->
drv->name, dev_name(&phydev->
dev));
16241 ethtype =
"10/100Base-TX";
16243 ethtype =
"1000Base-SX";
16245 ethtype =
"10/100/1000Base-T";
16247 netdev_info(dev,
"attached PHY is %s (%s Ethernet) "
16248 "(WireSpeed[%d], EEE[%d])\n",
16249 tg3_phy_string(tp), ethtype,
16254 netdev_info(dev,
"RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16256 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16260 netdev_info(dev,
"dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16284 err_out_power_down:
16290 err_out_disable_pdev:
16292 pci_set_drvdata(pdev,
NULL);
16298 struct net_device *dev = pci_get_drvdata(pdev);
16301 struct tg3 *tp = netdev_priv(dev);
16305 tg3_reset_task_cancel(tp);
16324 pci_set_drvdata(pdev,
NULL);
16328 #ifdef CONFIG_PM_SLEEP
16332 struct net_device *dev = pci_get_drvdata(pdev);
16333 struct tg3 *tp = netdev_priv(dev);
16336 if (!netif_running(dev))
16339 tg3_reset_task_cancel(tp);
16341 tg3_netif_stop(tp);
16343 tg3_timer_stop(tp);
16345 tg3_full_lock(tp, 1);
16346 tg3_disable_ints(tp);
16347 tg3_full_unlock(tp);
16351 tg3_full_lock(tp, 0);
16354 tg3_full_unlock(tp);
16356 err = tg3_power_down_prepare(tp);
16360 tg3_full_lock(tp, 0);
16363 err2 = tg3_restart_hw(tp, 1);
16367 tg3_timer_start(tp);
16370 tg3_netif_start(tp);
16373 tg3_full_unlock(tp);
16382 static int tg3_resume(
struct device *device)
16385 struct net_device *dev = pci_get_drvdata(pdev);
16386 struct tg3 *tp = netdev_priv(dev);
16389 if (!netif_running(dev))
16394 tg3_full_lock(tp, 0);
16397 err = tg3_restart_hw(tp, 1);
16401 tg3_timer_start(tp);
16403 tg3_netif_start(tp);
16406 tg3_full_unlock(tp);
16415 #define TG3_PM_OPS (&tg3_pm_ops)
16419 #define TG3_PM_OPS NULL
16434 struct net_device *netdev = pci_get_drvdata(pdev);
16435 struct tg3 *tp = netdev_priv(netdev);
16438 netdev_info(netdev,
"PCI I/O error detected\n");
16442 if (!netif_running(netdev))
16447 tg3_netif_stop(tp);
16449 tg3_timer_stop(tp);
16452 tg3_reset_task_cancel(tp);
16457 tg3_full_lock(tp, 0);
16459 tg3_full_unlock(tp);
16483 struct net_device *netdev = pci_get_drvdata(pdev);
16484 struct tg3 *tp = netdev_priv(netdev);
16491 netdev_err(netdev,
"Cannot re-enable PCI device after reset.\n");
16499 if (!netif_running(netdev)) {
16504 err = tg3_power_up(tp);
16523 static void tg3_io_resume(
struct pci_dev *pdev)
16525 struct net_device *netdev = pci_get_drvdata(pdev);
16526 struct tg3 *tp = netdev_priv(netdev);
16531 if (!netif_running(netdev))
16534 tg3_full_lock(tp, 0);
16536 err = tg3_restart_hw(tp, 1);
16537 tg3_full_unlock(tp);
16539 netdev_err(netdev,
"Cannot restart hardware after reset.\n");
16545 tg3_timer_start(tp);
16547 tg3_netif_start(tp);
16556 .error_detected = tg3_io_error_detected,
16557 .slot_reset = tg3_io_slot_reset,
16558 .resume = tg3_io_resume
16563 .id_table = tg3_pci_tbl,
16564 .probe = tg3_init_one,
16566 .err_handler = &tg3_err_handler,
16570 static int __init tg3_init(
void)
16572 return pci_register_driver(&tg3_driver);
16575 static void __exit tg3_cleanup(
void)